chore: bring back store v1 to main (#20263)

This commit is contained in:
Marko 2024-05-03 14:21:16 +02:00 committed by GitHub
parent 52dbcc171c
commit 94338e516d
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
203 changed files with 23273 additions and 691 deletions

View File

@ -241,7 +241,3 @@ Those modules can be considered as part of the Cosmos SDK, but features and impr
### Modules that do not depend on the Cosmos SDK
Modules that do not depend on the Cosmos SDK can be released at any time from the `main` branch of the Cosmos SDK repository.
#### Exception to the rule
* Store v1 is released from `release/v0.50.x` branch.

View File

@ -16,6 +16,7 @@ use (
./simapp
./tests
./store
./store/v2
./tools/cosmovisor
./tools/confix
./tools/hubl

View File

@ -8,20 +8,23 @@ schema = 3
version = "v1.32.0-20231117195010-33ed361a9051.1"
hash = "sha256-7T1OD5XbhEPpd9GTBihHsqLNBsAAd4aaxLQ8rmWboLk="
[mod."cloud.google.com/go"]
version = "v0.112.0"
hash = "sha256-lmNLoqmLURfxu+a6V/SeoP8xVn0Wi2SD7uxxAtSjm+o="
[mod."cloud.google.com/go/compute"]
version = "v1.24.0"
hash = "sha256-icDjR0uxYeazRbhsBgl8Dx7z/oRZJ/iqK6CGjCtsaQQ="
version = "v0.112.2"
hash = "sha256-Bk5MD40eefJlyUk96arLU/X1+EHItM7MjRPJtV0CU58="
[mod."cloud.google.com/go/auth"]
version = "v0.2.2"
hash = "sha256-xlhXKBwHDAGalOYhl6/ZSu+1LInr4xOdo4n0LfEEbuY="
[mod."cloud.google.com/go/auth/oauth2adapt"]
version = "v0.2.1"
hash = "sha256-meY0Sms2PtUe6kGYmVUbqCTkGJJmA05v0eN+OuZvKXs="
[mod."cloud.google.com/go/compute/metadata"]
version = "v0.2.3"
hash = "sha256-kYB1FTQRdTDqCqJzSU/jJYbVUGyxbkASUKbEs36FUyU="
version = "v0.3.0"
hash = "sha256-hj2Xjlz3vj7KYONZO/ItclWGGJEUgo5EvMEkGPfQi1Q="
[mod."cloud.google.com/go/iam"]
version = "v1.1.6"
hash = "sha256-u91oZdyy/wgk3J8Z+4mWmn+YliSBIATu6kpyH20Dd8k="
version = "v1.1.7"
hash = "sha256-+HdgBZbhH9ZdifQ2s1IZGnCOA8xwdnsB4AZTaV5m2IU="
[mod."cloud.google.com/go/storage"]
version = "v1.36.0"
hash = "sha256-dRKH1NEyAfEpVo5Mma677L7z0JO9Mfd1bv1lr1uFngI="
version = "v1.40.0"
hash = "sha256-/G0VKU/MZ2zBF+05UOsCOdzIf7LurwV9RtozVtiKkm0="
[mod."cosmossdk.io/errors"]
version = "v1.0.1"
hash = "sha256-MgTocXkBzri9FKkNtkARJXPmxRrRO/diQJS5ZzvYrJY="
@ -35,8 +38,8 @@ schema = 3
version = "v1.1.0"
hash = "sha256-CMV+5pn1BP5ixj9pICPlbIQ9Ezy6uP7mwG7SSTXkb3k="
[mod."cosmossdk.io/x/tx"]
version = "v0.13.1"
hash = "sha256-dHv2Zx8xbqfv1Gq0syh33G7TFJOVx4857QMH4b8Jj9k="
version = "v0.13.3"
hash = "sha256-7xN5da7GZ47oAX2zGaLcIdA5zPCn9VDLfsJWtbm3VFY="
[mod."filippo.io/edwards25519"]
version = "v1.1.0"
hash = "sha256-9ACANrgWZSd5HYPfDZHY8DVbPSC9LOMgy8deq3rDOoc="
@ -57,8 +60,8 @@ schema = 3
version = "v0.6.1"
hash = "sha256-BL0BVaHtmPKQts/711W59AbHXjGKqFS4ZTal0RYnR9I="
[mod."github.com/aws/aws-sdk-go"]
version = "v1.45.25"
hash = "sha256-ZzeU4WSHm5shDqGnK2mXC2p18NyAO+hKZHP7l1KR69k="
version = "v1.51.25"
hash = "sha256-eYGEnqYOVeTfwPuLiCYVgh5Bj2crnO6QUtNoLJBNPH0="
[mod."github.com/aymanbagabas/go-osc52/v2"]
version = "v2.0.1"
hash = "sha256-6Bp0jBZ6npvsYcKZGHHIUSVSTAMEyieweAX2YAKDjjg="
@ -75,17 +78,14 @@ schema = 3
version = "v1.10.0"
hash = "sha256-/Kkx33umYGS1keFnkmJ+DHgIAtkEDNI42nVpKYfUOTs="
[mod."github.com/btcsuite/btcd/btcec/v2"]
version = "v2.3.2"
hash = "sha256-natWs+yIAuD1UI07iZtjPilroQLfXizFn3lNOiOT83U="
[mod."github.com/cenkalti/backoff/v4"]
version = "v4.2.1"
hash = "sha256-CKogmPe0pCcAdpztzPwr24rLTJZfq8QVZ9AUduwAcoA="
version = "v2.3.3"
hash = "sha256-1L9u3uPeskDd8Lv8Eq54tXi8f5Vj/KwfT2i+qPCA+pg="
[mod."github.com/cespare/xxhash"]
version = "v1.1.0"
hash = "sha256-nVDTtXH9PC3yJ0THaQZEN243UP9xgLi/clt5xRqj3+M="
[mod."github.com/cespare/xxhash/v2"]
version = "v2.2.0"
hash = "sha256-nPufwYQfTkyrEkbBrpqM3C2vnMxfIz6tAaBmiUP7vd4="
version = "v2.3.0"
hash = "sha256-7hRlwSR+fos1kx4VZmJ/7snR7zHh8ZFKX+qqqqGcQpY="
[mod."github.com/chzyer/readline"]
version = "v1.5.1"
hash = "sha256-6wKd6/JZ9/O7FwSyNKE3KOt8fVPZEunqbTHQUxlOUNc="
@ -108,8 +108,8 @@ schema = 3
version = "v0.0.0-20230807174530-cc333fc44b06"
hash = "sha256-yZdBXkTVzPxRYntI9I2Gu4gkI11m52Nwl8RNNdlXSrA="
[mod."github.com/cometbft/cometbft"]
version = "v0.38.7-0.20240412124004-1f67e396cf45"
hash = "sha256-BYBhYbEiifnEfYzTDnV994yue23vM0wBgfDb18teVV8="
version = "v0.38.7"
hash = "sha256-mN7L3Q4xZ0YcwMIRkIsodCzRPBZqrSKD7VgDHBOWM98="
[mod."github.com/cometbft/cometbft-db"]
version = "v0.11.0"
hash = "sha256-qs3J+9ZW7gStN2W+5SQf/JSmgX5Q5kmIau1BLxze5iE="
@ -120,8 +120,8 @@ schema = 3
version = "v1.0.2"
hash = "sha256-WjDoB2AGoIyEW30LlGcQX5JVACJbs0jWSY58IuJHz0M="
[mod."github.com/cosmos/cosmos-proto"]
version = "v1.0.0-beta.4"
hash = "sha256-5Kn82nsZfiEtuwhhLZqmMxdAY1tX/Fi3HJ0/MEaRohw="
version = "v1.0.0-beta.5"
hash = "sha256-Fy/PbsOsd6iq0Njy3DVWK6HqWsogI+MkE8QslHGWyVg="
[mod."github.com/cosmos/go-bip39"]
version = "v1.0.0"
hash = "sha256-Qm2aC2vaS8tjtMUbHmlBSagOSqbduEEDwc51qvQaBmA="
@ -132,8 +132,8 @@ schema = 3
version = "v1.4.12"
hash = "sha256-e2tbfaZtzLijq+EMnNG9GWKDCG4sBj8wIVnn6/R26iM="
[mod."github.com/cosmos/iavl"]
version = "v1.1.1"
hash = "sha256-jeL74lBAld+9etm3mvcGNG8eHrjrMGdUsm69nb+yDcE="
version = "v1.1.2"
hash = "sha256-fhh5fN1BMDxbF4PobERMQdIb9vIrxaSl0tRXas0WKmc="
[mod."github.com/cosmos/ics23/go"]
version = "v0.10.0"
hash = "sha256-KYEv727BO/ht63JO02xiKFGFAddg41Ve9l2vSSZZBq0="
@ -155,9 +155,6 @@ schema = 3
[mod."github.com/decred/dcrd/dcrec/secp256k1/v4"]
version = "v4.3.0"
hash = "sha256-ADbhI5Ad+q3OxooIiYeLAq5mMONk1gPIAnTch9zKsIM="
[mod."github.com/desertbit/timer"]
version = "v0.0.0-20180107155436-c41aec40b27f"
hash = "sha256-abLOtEcomAqCWLphd2X6WkD/ED764w6sa6unox4BXss="
[mod."github.com/dgraph-io/badger/v2"]
version = "v2.2007.4"
hash = "sha256-+KwqZJZpViv8S3TqUVvPXrFoMgWFyS3NoLsi4RR5fGk="
@ -174,8 +171,8 @@ schema = 3
version = "v1.6.0"
hash = "sha256-IXn2BuUp4fi/i2zf1tGGW1m9xoYh3VCksB6GJ5Sf06g="
[mod."github.com/emicklei/dot"]
version = "v1.6.1"
hash = "sha256-zOpoaepCfPLmU9iQji/Ait+SVEHI9eF3rwtW0h/8lho="
version = "v1.6.2"
hash = "sha256-X7aNKLKZ7pJBG/wdP+TWuQnlNLNdbUDd+kC5kF4uBtU="
[mod."github.com/fatih/color"]
version = "v1.16.0"
hash = "sha256-Aq/SM28aPJVzvapllQ64R/DM4aZ5CHPewcm/AUJPyJQ="
@ -246,8 +243,8 @@ schema = 3
version = "v0.3.2"
hash = "sha256-wVuR3QC0mYFl5LNeKdRXdKdod7BGP5sv2h6VVib85v8="
[mod."github.com/googleapis/gax-go/v2"]
version = "v2.12.0"
hash = "sha256-ZcXS+1B11UaJHf8D15N3ZCh00fiMUncpHd+eNRffLZ4="
version = "v2.12.3"
hash = "sha256-FSlL1GXLe/e7gol/D9GOp3iC04s58UtDXcwiKSalUwE="
[mod."github.com/gorilla/handlers"]
version = "v1.5.2"
hash = "sha256-2WQeVCe7vQg+8MpNLMhOGsRdbrcWLpbtUhUX8mbiQrs="
@ -270,8 +267,8 @@ schema = 3
version = "v0.5.2"
hash = "sha256-N9GOKYo7tK6XQUFhvhImtL7PZW/mr4C4Manx/yPVvcQ="
[mod."github.com/hashicorp/go-getter"]
version = "v1.7.3"
hash = "sha256-z3zrjcOsgJrZkGLwaKVauq/MFAPtulXatV/RrkKNJv4="
version = "v1.7.4"
hash = "sha256-GtJSwcS1WXLn9lFAuTRCseIQBXJOElAywEhTtYrsfbE="
[mod."github.com/hashicorp/go-hclog"]
version = "v1.6.2"
hash = "sha256-cGlKyuctpU6Jd+L1ybCoJrBwnBlHXks4CQYkTQMCxDU="
@ -308,9 +305,6 @@ schema = 3
[mod."github.com/iancoleman/strcase"]
version = "v0.3.0"
hash = "sha256-lVOk4klrikSCUviR16qcyAr6eoIbniUSfsLFOE1ZLpk="
[mod."github.com/improbable-eng/grpc-web"]
version = "v0.15.0"
hash = "sha256-9oqKb5Y3hjleOFE2BczbEzLH6q2Jg7kUTP/M8Yk4Ne4="
[mod."github.com/inconshreveable/mousetrap"]
version = "v1.1.0"
hash = "sha256-XWlYH0c8IcxAwQTnIi6WYqq44nOKUylSWxWO/vi+8pE="
@ -321,8 +315,8 @@ schema = 3
version = "v1.0.0"
hash = "sha256-xEd0mDBeq3eR/GYeXjoTVb2sPs8sTCosn5ayWkcgENI="
[mod."github.com/klauspost/compress"]
version = "v1.17.7"
hash = "sha256-UkW+tAFEZYj067z9gXDQjQx0dCF8noCn5OSw/APh4oo="
version = "v1.17.8"
hash = "sha256-8rgCCfHX29le8m6fyVn6gwFde5TPUHjwQqZqv9JIubs="
[mod."github.com/kr/pretty"]
version = "v0.3.1"
hash = "sha256-DlER7XM+xiaLjvebcIPiB12oVNjyZHuJHoRGITzzpKU="
@ -384,11 +378,11 @@ schema = 3
version = "v1.1.0"
hash = "sha256-U4IS0keJa4BSBSeEBqtIV1Zg6N4b0zFiKfzN9ua4pWQ="
[mod."github.com/pelletier/go-toml/v2"]
version = "v2.2.0"
hash = "sha256-hbjvVGN0puPwpMq2bDlixeuUj8+we3BOgwZuY7/SM+Y="
version = "v2.2.1"
hash = "sha256-gmQ4CTz/MI97D3pYqU7mpxqo8gBTDccQ1Cp0lAMmJUc="
[mod."github.com/petermattis/goid"]
version = "v0.0.0-20231207134359-e60b3f734c67"
hash = "sha256-73DbyhUTwYhqmvbcI96CNblTrfl6uz9OvM6z/h8j5TM="
version = "v0.0.0-20240327183114-c42a807a84ba"
hash = "sha256-f2enuVnb6nrQX0uBc3WYEK68TiLUp4Y1qisx84ElaFA="
[mod."github.com/pkg/errors"]
version = "v0.9.1"
hash = "sha256-mNfQtcrQmu3sNg/7IwiieKWOgFQOVVe2yXgKBpe/wZw="
@ -399,14 +393,14 @@ schema = 3
version = "v1.19.0"
hash = "sha256-YV8sxMPR+xorTUCriTfcFsaV2b7PZfPJDQmOgUYOZJo="
[mod."github.com/prometheus/client_model"]
version = "v0.6.0"
hash = "sha256-TAD0mm7msYHo99yoNijeYzlDD0i1Vg3uTetpkDUWQo8="
version = "v0.6.1"
hash = "sha256-rIDyUzNfxRA934PIoySR0EhuBbZVRK/25Jlc/r8WODw="
[mod."github.com/prometheus/common"]
version = "v0.52.2"
hash = "sha256-XQUvk9/Kwf9NDlDUVl7mOWRD7z7z9QEbLH/rNU4D2nI="
version = "v0.53.0"
hash = "sha256-IO5DnFEYXNe5nfumAebAuiZjNaJlTiHTD0GOMqNT26o="
[mod."github.com/prometheus/procfs"]
version = "v0.13.0"
hash = "sha256-J31K36TkIiQU2EGOcmqDa+dkoKXiVuxafPVT4rKbEsg="
version = "v0.14.0"
hash = "sha256-NZfiTx9g098TFnsA1Q/niXxTqybkbNG1BItaXSiRsnQ="
[mod."github.com/rcrowley/go-metrics"]
version = "v0.0.0-20201227073835-cf1acfcdf475"
hash = "sha256-10ytHQ1SpMKYTiKuOPdEMuOVa8HVvv9ryYSIF9BHEBI="
@ -466,8 +460,8 @@ schema = 3
version = "v1.7.0"
hash = "sha256-bnr6c7a0nqo2HyGqxHk0kEZCEsjLYkPbAVY9WzaZ30o="
[mod."github.com/ulikunitz/xz"]
version = "v0.5.11"
hash = "sha256-SUyrjc2wyN3cTGKe5JdBEXjtZC1rJySRxJHVUZ59row="
version = "v0.5.12"
hash = "sha256-i8IGHLdPZkKsmgHNB2cHHI4/493tJh7uiBzoKXXXgOA="
[mod."github.com/zondax/hid"]
version = "v0.9.2"
hash = "sha256-9h1gEJ/loyaJvu9AsmslztiA8U9ixDTC6TBw9lCU2BE="
@ -487,20 +481,20 @@ schema = 3
version = "v0.24.0"
hash = "sha256-4H+mGZgG2c9I1y0m8avF4qmt8LUKxxVsTqR8mKgP4yo="
[mod."go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"]
version = "v0.47.0"
hash = "sha256-D+bP2jEZcB4S8AprlDM3qAghYtxhqc8fSKZNac6WVFs="
version = "v0.50.0"
hash = "sha256-ucFsEVD4lTfUkBjIpJRJrjaX2lRb9lLKJSadPrcCz3I="
[mod."go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"]
version = "v0.47.0"
hash = "sha256-Pv1X0oIWYXyVxEaDQmgYcw+49I9+65N9Y+1wbxoXOog="
version = "v0.50.0"
hash = "sha256-2ZZp2ADv9q6+3CCS2KLBWr+UBhNAd/x8ixov/CA3LJw="
[mod."go.opentelemetry.io/otel"]
version = "v1.22.0"
hash = "sha256-4K70RPjaPzPpTO/VkE9ueoSo9EANuNXneDR6jEiUaJQ="
version = "v1.25.0"
hash = "sha256-HMruryzhsHkfVwEt1TE2Wj2r1z0U6xFAsVTPFihpn/k="
[mod."go.opentelemetry.io/otel/metric"]
version = "v1.22.0"
hash = "sha256-Lb4wdlZNmz6Ut6CljBAePSUA8X0RBEOEDyOl2oO+pL8="
version = "v1.25.0"
hash = "sha256-s4hqgIhkIDA8oN0e8aWMFkgkm68RDZUzT0lUxEdanaw="
[mod."go.opentelemetry.io/otel/trace"]
version = "v1.22.0"
hash = "sha256-38zzkmcoOzYYeDN+rC44HmwmdnalIcEpObCS6tIvMO8="
version = "v1.25.0"
hash = "sha256-sz8Pv8Yy0gGM82Tfo0X+48mUU/wiD7t9riaBrmgeb8E="
[mod."go.uber.org/multierr"]
version = "v1.11.0"
hash = "sha256-Lb6rHHfR62Ozg2j2JZy3MKOMKdsfzd1IYTR57r3Mhp0="
@ -508,17 +502,17 @@ schema = 3
version = "v0.22.0"
hash = "sha256-2+u9nd32+Bi7EEv7QFc12CRTbfV7DApNv+yKIr7+lTw="
[mod."golang.org/x/exp"]
version = "v0.0.0-20240318143956-a85f2c67cd81"
hash = "sha256-HjEmpjgXqIuGc5rsTVkg6OPAN2fGhfkska7cwRu1qaw="
version = "v0.0.0-20240416160154-fe59bbe5cc7f"
hash = "sha256-168CD9hlLJaQ7stQk/ztlP3zgaWXUMbIHa38gAeRRs4="
[mod."golang.org/x/mod"]
version = "v0.16.0"
hash = "sha256-aN1Cz5Wqd9YCjK8nFW6JWn+n1HfFoEcgYZmGO/FYtbw="
version = "v0.17.0"
hash = "sha256-CLaPeF6uTFuRDv4oHwOQE6MCMvrzkUjWN3NuyywZjKU="
[mod."golang.org/x/net"]
version = "v0.24.0"
hash = "sha256-w1c21ljta5wNIyel9CSIn/crPzwOCRofNKhqmfs4aEQ="
[mod."golang.org/x/oauth2"]
version = "v0.18.0"
hash = "sha256-TX4CvtvHU+SGSmqlxaQqlgJjlJiOtLGYAZa0zeBfZak="
version = "v0.19.0"
hash = "sha256-IYdkq8R8BXnwoBt/ZLAMJr0DkLZDMVkjeBJNQ/Z9Bes="
[mod."golang.org/x/sync"]
version = "v0.7.0"
hash = "sha256-2ETllEu2GDWoOd/yMkOkLC2hWBpKzbVZ8LhjLu0d2A8="
@ -535,29 +529,26 @@ schema = 3
version = "v0.5.0"
hash = "sha256-W6RgwgdYTO3byIPOFxrP2IpAZdgaGowAaVfYby7AULU="
[mod."golang.org/x/tools"]
version = "v0.19.0"
hash = "sha256-Xf05Ao398gBzxn5C8H6x+XsLjFLIm+UUfpDekQYA0cw="
version = "v0.20.0"
hash = "sha256-g5T5FrNPO/cf2W1lc+/93FcFB3HftPjqI72FueD9Wt8="
[mod."google.golang.org/api"]
version = "v0.162.0"
hash = "sha256-+AsT4DPjefEmPPelZoSHuQ8nCHhmhhUWU4UGnJ/8+fg="
[mod."google.golang.org/appengine"]
version = "v1.6.8"
hash = "sha256-decMa0MiWfW/Bzr8QPPzzpeya0YWGHhZAt4Cr/bD1wQ="
version = "v0.175.0"
hash = "sha256-0NVK3UxAm8Sp8mux2GHeD4rA97u37U7yuE9vDd+wJlg="
[mod."google.golang.org/genproto"]
version = "v0.0.0-20240227224415-6ceb2ff114de"
hash = "sha256-G+tvsCTXxzk3sS6HbBxPN1DYaN1tPOqKsa60mI05Feg="
version = "v0.0.0-20240415180920-8c6c420018be"
hash = "sha256-IkbfNXhXtoemc7KWZQncKR/ykLpmfW7yb/P6ULMWEek="
[mod."google.golang.org/genproto/googleapis/api"]
version = "v0.0.0-20240227224415-6ceb2ff114de"
hash = "sha256-H3d2ZhPJI9RH5EK9NsxUAFmT6tr2DgGV9SjZgqJ80r4="
version = "v0.0.0-20240415180920-8c6c420018be"
hash = "sha256-0Bc66Utj1rydwYngQxTQoTyg1Td2D+nIxukc0zz7XFc="
[mod."google.golang.org/genproto/googleapis/rpc"]
version = "v0.0.0-20240401170217-c3f982113cda"
version = "v0.0.0-20240415180920-8c6c420018be"
hash = "sha256-P5SBku16dYnK4koUQxTeGwPxAAWH8rxbDm2pOzFLo/Q="
[mod."google.golang.org/grpc"]
version = "v1.63.2"
hash = "sha256-RmtVjYLam97k7IHTHU7Gn16xNX+GvA9AiLKlQwOiZXU="
[mod."google.golang.org/protobuf"]
version = "v1.33.0"
hash = "sha256-cWwQjtUwSIEkAlAadrlxK1PYZXTRrV4NKzt7xDpJgIU="
version = "v1.34.0"
hash = "sha256-0fqsqQTyOicm4+NiuAf2IaJhavMVJh50VhNcRmZPSus="
[mod."gopkg.in/ini.v1"]
version = "v1.67.0"
hash = "sha256-V10ahGNGT+NLRdKUyRg1dos5RxLBXBk1xutcnquc/+4="
@ -567,9 +558,6 @@ schema = 3
[mod."gotest.tools/v3"]
version = "v3.5.1"
hash = "sha256-ps2GEc3P2xvlrU4TCtXz+nLTxyP0RrF7SScz5jUqE5E="
[mod."nhooyr.io/websocket"]
version = "v1.8.10"
hash = "sha256-EsUWUFIA2uJTap1DfsYuSxlPMH3UHDpxEohJMalDOcI="
[mod."pgregory.net/rapid"]
version = "v1.1.0"
hash = "sha256-sVQY9EQ9Y5blYyVYfaOa+y12e+399OqdHiEY3BaDnqo="

View File

@ -23,46 +23,44 @@ Ref: https://keepachangelog.com/en/1.0.0/
# Changelog
## [Unreleased]
## v1.1.0 (March 20, 2024)
### Improvements
* [#19770](https://github.com/cosmos/cosmos-sdk/pull/19770) Upgrade IAVL to IAVL v1.1.1.
## v1.0.2 (January 10, 2024)
### Bug Fixes
* [#18897](https://github.com/cosmos/cosmos-sdk/pull/18897) Replace panic in pruning to avoid consensus halting.
## v1.0.1 (November 28, 2023)
### Bug Fixes
* [#18563](https://github.com/cosmos/cosmos-sdk/pull/18563) `LastCommitID().Hash` will always return `sha256([]byte{})` if the store is empty.
## v1.0.0 (October 31, 2023)
### Features
* [#17294](https://github.com/cosmos/cosmos-sdk/pull/17294) Add snapshot manager Close method.
* [#15568](https://github.com/cosmos/cosmos-sdk/pull/15568) Migrate the `iavl` to the new key format.
* Remove `DeleteVersion`, `DeleteVersions`, `LazyLoadVersionForOverwriting` from `iavl` tree API.
* Add `DeleteVersionsTo` and `SaveChangeSet`, since it will keep versions sequentially like `fromVersion` to `toVersion`.
* Refactor the pruning manager to use `DeleteVersionsTo`.
* [#15712](https://github.com/cosmos/cosmos-sdk/pull/15712) Add `WorkingHash` function to the store interface to get the current app hash before commit.
* [#14645](https://github.com/cosmos/cosmos-sdk/pull/14645) Add limit to the length of key and value.
* [#15683](https://github.com/cosmos/cosmos-sdk/pull/15683) `rootmulti.Store.CacheMultiStoreWithVersion` now can handle loading archival states that don't persist any of the module stores the current state has.
* [#16060](https://github.com/cosmos/cosmos-sdk/pull/16060) Support saving restoring snapshot locally.
* [#14746](https://github.com/cosmos/cosmos-sdk/pull/14746) The `store` module is extracted to have a separate go.mod file which allows it be a standalone module.
* [#14410](https://github.com/cosmos/cosmos-sdk/pull/14410) `rootmulti.Store.loadVersion` has validation to check if all the module stores' height is correct, it will error if any module store has incorrect height.
### Improvements
* [#17158](https://github.com/cosmos/cosmos-sdk/pull/17158) Start the goroutine after need to create a snapshot.
### Bug fixes
* [#18651](https://github.com/cosmos/cosmos-sdk/pull/18651) Propagate iavl.MutableTree.Remove errors firstly to the caller instead of returning a synthesized error firstly.
## [v1.0.0-alpha.1](https://github.com/cosmos/cosmos-sdk/releases/tag/store%2Fv1.0.0-alpha.1) - 2023-07-11
### Features
* [#15568](https://github.com/cosmos/cosmos-sdk/pull/15568) Migrate the `iavl` to the new key format.
* Remove `DeleteVersion`, `DeleteVersions`, `LazyLoadVersionForOverwriting` from `iavl` tree API.
* Add `DeleteVersionsTo`, since it will keep versions sequentially like `fromVersion` to `toVersion`.
* Refactor the pruning manager to use `DeleteVersionsTo`.
* [#15712](https://github.com/cosmos/cosmos-sdk/pull/15712) Add `WorkingHash` function to the store interface to get the current app hash before commit.
* [#15432](https://github.com/cosmos/cosmos-sdk/pull/15432) Add `TraverseStateChanges` to the store interface to get the state changes between two versions.
* [#14645](https://github.com/cosmos/cosmos-sdk/pull/14645) Add limit to the length of key and value.
* [#15683](https://github.com/cosmos/cosmos-sdk/pull/15683) `rootmulti.Store.CacheMultiStoreWithVersion` now can handle loading archival states that don't persist any of the module stores the current state has.
* [#16060](https://github.com/cosmos/cosmos-sdk/pull/16060) Support saving restoring snapshot locally.
### API Breaking Changes
* [#16321](https://github.com/cosmos/cosmos-sdk/pull/16321) QueryInterface defines its own request and response types instead of relying on comet/abci & returns an error
### Bug Fixes
* [#16588](https://github.com/cosmos/cosmos-sdk/pull/16588) Propagate the Snapshotter's failure to the caller, (it will create a empty snapshot silently before).
## [v0.1.0-alpha.1](https://github.com/cosmos/cosmos-sdk/releases/tag/store%2Fv0.1.0-alpha.1) - 2023-03-17
### Features
* [#14746](https://github.com/cosmos/cosmos-sdk/pull/14746) The `store` module is extracted to have a separate go.mod file which allows it be a standalone module.
* [#14410](https://github.com/cosmos/cosmos-sdk/pull/14410) `rootmulti.Store.loadVersion` has validation to check if all the module stores' height is correct, it will error if any module store has incorrect height.

49
store/cache/benchmark_test.go vendored Normal file
View File

@ -0,0 +1,49 @@
package cache
import (
"testing"
"cosmossdk.io/store/types"
)
func freshMgr() *CommitKVStoreCacheManager {
return &CommitKVStoreCacheManager{
caches: map[string]types.CommitKVStore{
"a1": nil,
"alalalalalal": nil,
},
}
}
func populate(mgr *CommitKVStoreCacheManager) {
mgr.caches["this one"] = (types.CommitKVStore)(nil)
mgr.caches["those ones are the ones"] = (types.CommitKVStore)(nil)
mgr.caches["very huge key right here and there are we going to ones are the ones"] = (types.CommitKVStore)(nil)
}
func BenchmarkReset(b *testing.B) {
b.ReportAllocs()
mgr := freshMgr()
b.ResetTimer()
b.ReportAllocs()
for i := 0; i < b.N; i++ {
mgr.Reset()
if len(mgr.caches) != 0 {
b.Fatal("Reset failed")
}
populate(mgr)
if len(mgr.caches) == 0 {
b.Fatal("populate failed")
}
mgr.Reset()
if len(mgr.caches) != 0 {
b.Fatal("Reset failed")
}
}
if mgr == nil {
b.Fatal("Impossible condition")
}
}

132
store/cache/cache.go vendored Normal file
View File

@ -0,0 +1,132 @@
package cache
import (
"fmt"
lru "github.com/hashicorp/golang-lru"
"cosmossdk.io/store/cachekv"
"cosmossdk.io/store/types"
)
var (
_ types.CommitKVStore = (*CommitKVStoreCache)(nil)
_ types.MultiStorePersistentCache = (*CommitKVStoreCacheManager)(nil)
// DefaultCommitKVStoreCacheSize defines the persistent ARC cache size for a
// CommitKVStoreCache.
DefaultCommitKVStoreCacheSize uint = 1000
)
type (
// CommitKVStoreCache implements an inter-block (persistent) cache that wraps a
// CommitKVStore. Reads first hit the internal ARC (Adaptive Replacement Cache).
// During a cache miss, the read is delegated to the underlying CommitKVStore
// and cached. Deletes and writes always happen to both the cache and the
// CommitKVStore in a write-through manner. Caching performed in the
// CommitKVStore and below is completely irrelevant to this layer.
CommitKVStoreCache struct {
types.CommitKVStore
cache *lru.ARCCache
}
// CommitKVStoreCacheManager maintains a mapping from a StoreKey to a
// CommitKVStoreCache. Each CommitKVStore, per StoreKey, is meant to be used
// in an inter-block (persistent) manner and typically provided by a
// CommitMultiStore.
CommitKVStoreCacheManager struct {
cacheSize uint
caches map[string]types.CommitKVStore
}
)
func NewCommitKVStoreCache(store types.CommitKVStore, size uint) *CommitKVStoreCache {
cache, err := lru.NewARC(int(size))
if err != nil {
panic(fmt.Errorf("failed to create KVStore cache: %s", err))
}
return &CommitKVStoreCache{
CommitKVStore: store,
cache: cache,
}
}
func NewCommitKVStoreCacheManager(size uint) *CommitKVStoreCacheManager {
return &CommitKVStoreCacheManager{
cacheSize: size,
caches: make(map[string]types.CommitKVStore),
}
}
// GetStoreCache returns a Cache from the CommitStoreCacheManager for a given
// StoreKey. If no Cache exists for the StoreKey, then one is created and set.
// The returned Cache is meant to be used in a persistent manner.
func (cmgr *CommitKVStoreCacheManager) GetStoreCache(key types.StoreKey, store types.CommitKVStore) types.CommitKVStore {
if cmgr.caches[key.Name()] == nil {
cmgr.caches[key.Name()] = NewCommitKVStoreCache(store, cmgr.cacheSize)
}
return cmgr.caches[key.Name()]
}
// Unwrap returns the underlying CommitKVStore for a given StoreKey.
func (cmgr *CommitKVStoreCacheManager) Unwrap(key types.StoreKey) types.CommitKVStore {
if ckv, ok := cmgr.caches[key.Name()]; ok {
return ckv.(*CommitKVStoreCache).CommitKVStore
}
return nil
}
// Reset resets in the internal caches.
func (cmgr *CommitKVStoreCacheManager) Reset() {
// Clear the map.
// Please note that we are purposefully using the map clearing idiom.
// See https://github.com/cosmos/cosmos-sdk/issues/6681.
for key := range cmgr.caches {
delete(cmgr.caches, key)
}
}
// CacheWrap implements the CacheWrapper interface
func (ckv *CommitKVStoreCache) CacheWrap() types.CacheWrap {
return cachekv.NewStore(ckv)
}
// Get retrieves a value by key. It will first look in the write-through cache.
// If the value doesn't exist in the write-through cache, the query is delegated
// to the underlying CommitKVStore.
func (ckv *CommitKVStoreCache) Get(key []byte) []byte {
types.AssertValidKey(key)
keyStr := string(key)
valueI, ok := ckv.cache.Get(keyStr)
if ok {
// cache hit
return valueI.([]byte)
}
// cache miss; write to cache
value := ckv.CommitKVStore.Get(key)
ckv.cache.Add(keyStr, value)
return value
}
// Set inserts a key/value pair into both the write-through cache and the
// underlying CommitKVStore.
func (ckv *CommitKVStoreCache) Set(key, value []byte) {
types.AssertValidKey(key)
types.AssertValidValue(value)
ckv.cache.Add(string(key), value)
ckv.CommitKVStore.Set(key, value)
}
// Delete removes a key/value pair from both the write-through cache and the
// underlying CommitKVStore.
func (ckv *CommitKVStoreCache) Delete(key []byte) {
ckv.cache.Remove(string(key))
ckv.CommitKVStore.Delete(key)
}

101
store/cache/cache_test.go vendored Normal file
View File

@ -0,0 +1,101 @@
package cache_test
import (
"fmt"
"testing"
dbm "github.com/cosmos/cosmos-db"
"github.com/cosmos/iavl"
"github.com/stretchr/testify/require"
"cosmossdk.io/log"
"cosmossdk.io/store/cache"
"cosmossdk.io/store/cachekv"
iavlstore "cosmossdk.io/store/iavl"
"cosmossdk.io/store/types"
"cosmossdk.io/store/wrapper"
)
func TestGetOrSetStoreCache(t *testing.T) {
db := wrapper.NewDBWrapper(dbm.NewMemDB())
mngr := cache.NewCommitKVStoreCacheManager(cache.DefaultCommitKVStoreCacheSize)
sKey := types.NewKVStoreKey("test")
tree := iavl.NewMutableTree(db, 100, false, log.NewNopLogger())
store := iavlstore.UnsafeNewStore(tree)
store2 := mngr.GetStoreCache(sKey, store)
require.NotNil(t, store2)
require.Equal(t, store2, mngr.GetStoreCache(sKey, store))
}
func TestUnwrap(t *testing.T) {
db := wrapper.NewDBWrapper(dbm.NewMemDB())
mngr := cache.NewCommitKVStoreCacheManager(cache.DefaultCommitKVStoreCacheSize)
sKey := types.NewKVStoreKey("test")
tree := iavl.NewMutableTree(db, 100, false, log.NewNopLogger())
store := iavlstore.UnsafeNewStore(tree)
_ = mngr.GetStoreCache(sKey, store)
require.Equal(t, store, mngr.Unwrap(sKey))
require.Nil(t, mngr.Unwrap(types.NewKVStoreKey("test2")))
}
func TestStoreCache(t *testing.T) {
db := wrapper.NewDBWrapper(dbm.NewMemDB())
mngr := cache.NewCommitKVStoreCacheManager(cache.DefaultCommitKVStoreCacheSize)
sKey := types.NewKVStoreKey("test")
tree := iavl.NewMutableTree(db, 100, false, log.NewNopLogger())
store := iavlstore.UnsafeNewStore(tree)
kvStore := mngr.GetStoreCache(sKey, store)
for i := uint(0); i < cache.DefaultCommitKVStoreCacheSize*2; i++ {
key := []byte(fmt.Sprintf("key_%d", i))
value := []byte(fmt.Sprintf("value_%d", i))
kvStore.Set(key, value)
res := kvStore.Get(key)
require.Equal(t, res, value)
require.Equal(t, res, store.Get(key))
kvStore.Delete(key)
require.Nil(t, kvStore.Get(key))
require.Nil(t, store.Get(key))
}
}
func TestReset(t *testing.T) {
db := wrapper.NewDBWrapper(dbm.NewMemDB())
mngr := cache.NewCommitKVStoreCacheManager(cache.DefaultCommitKVStoreCacheSize)
sKey := types.NewKVStoreKey("test")
tree := iavl.NewMutableTree(db, 100, false, log.NewNopLogger())
store := iavlstore.UnsafeNewStore(tree)
store2 := mngr.GetStoreCache(sKey, store)
require.NotNil(t, store2)
require.Equal(t, store2, mngr.GetStoreCache(sKey, store))
// reset and check if the cache is gone
mngr.Reset()
require.Nil(t, mngr.Unwrap(sKey))
// check if the cache is recreated
require.Equal(t, store2, mngr.GetStoreCache(sKey, store))
}
func TestCacheWrap(t *testing.T) {
db := wrapper.NewDBWrapper(dbm.NewMemDB())
mngr := cache.NewCommitKVStoreCacheManager(cache.DefaultCommitKVStoreCacheSize)
sKey := types.NewKVStoreKey("test")
tree := iavl.NewMutableTree(db, 100, false, log.NewNopLogger())
store := iavlstore.UnsafeNewStore(tree)
cacheWrapper := mngr.GetStoreCache(sKey, store).CacheWrap()
require.IsType(t, &cachekv.Store{}, cacheWrapper)
}

140
store/cachekv/README.md Normal file
View File

@ -0,0 +1,140 @@
# CacheKVStore specification
A `CacheKVStore` is cache wrapper for a `KVStore`. It extends the operations of the `KVStore` to work with a write-back cache, allowing for reduced I/O operations and more efficient disposing of changes (e.g. after processing a failed transaction).
The core goals the CacheKVStore seeks to solve are:
* Buffer all writes to the parent store, so they can be dropped if they need to be reverted
* Allow iteration over contiguous spans of keys
* Act as a cache, improving access time for reads that have already been done (by replacing tree access with hashtable access, avoiding disk I/O)
* Note: We actually fail to achieve this for iteration right now
* Note: Need to consider this getting too large and dropping some cached reads
* Make subsequent reads account for prior buffered writes
* Write all buffered changes to the parent store
We should revisit these goals with time (for instance it's unclear that all disk writes need to be buffered to the end of the block), but this is the current status.
## Types and Structs
```go
type Store struct {
mtx sync.Mutex
cache map[string]*cValue
deleted map[string]struct{}
unsortedCache map[string]struct{}
sortedCache *dbm.MemDB // always ascending sorted
parent types.KVStore
}
```
The Store struct wraps the underlying `KVStore` (`parent`) with additional data structures for implementing the cache. Mutex is used as IAVL trees (the `KVStore` in application) are not safe for concurrent use.
### `cache`
The main mapping of key-value pairs stored in cache. This map contains both keys that are cached from read operations as well as dirty keys which map to a value that is potentially different than what is in the underlying `KVStore`.
Values that are mapped to in `cache` are wrapped in a `cValue` struct, which contains the value and a boolean flag (`dirty`) representing whether the value has been written since the last write-back to `parent`.
```go
type cValue struct {
value []byte
dirty bool
}
```
### `deleted`
Key-value pairs that are to be deleted from `parent` are stored in the `deleted` map. Keys are mapped to an empty struct to implement a set.
### `unsortedCache`
Similar to `deleted`, this is a set of keys that are dirty and will need to be updated in the parent `KVStore` upon a write. Keys are mapped to an empty struct to implement a set.
### `sortedCache`
A database that will be populated by the keys in `unsortedCache` during iteration over the cache. The keys are always held in sorted order.
## CRUD Operations and Writing
The `Set`, `Get`, and `Delete` functions all call `setCacheValue()`, which is the only entry point to mutating `cache` (besides `Write()`, which clears it).
`setCacheValue()` inserts a key-value pair into `cache`. Two boolean parameters, `deleted` and `dirty`, are passed in to flag whether the inserted key should also be inserted into the `deleted` and `dirty` sets. Keys will be removed from the `deleted` set if they are written to after being deleted.
### `Get`
`Get` first attempts to return the value from `cache`. If the key does not exist in `cache`, `parent.Get()` is called instead. This value from the parent is passed into `setCacheValue()` with `deleted=false` and `dirty=false`.
### `Has`
`Has` returns true if `Get` returns a non-nil value. As a result of calling `Get`, it may mutate the cache by caching the read.
### `Set`
New values are written by setting or updating the value of a key in `cache`. `Set` does not write to `parent`.
Calls `setCacheValue()` with `deleted=false` and `dirty=true`.
### `Delete`
A value being deleted from the `KVStore` is represented with a `nil` value in `cache`, and an insertion of the key into the `deleted` set. `Delete` does not write to `parent`.
Calls `setCacheValue()` with `deleted=true` and `dirty=true`.
### `Write`
Key-value pairs in the cache are written to `parent` in ascending order of their keys.
A slice of all dirty keys in `cache` is made, then sorted in increasing order. These keys are iterated over to update `parent`.
If a key is marked for deletion (checked with `isDeleted()`), then `parent.Delete()` is called. Otherwise, `parent.Set()` is called to update the underlying `KVStore` with the value in cache.
## Iteration
Efficient iteration over keys in `KVStore` is important for generating Merkle range proofs. Iteration over `CacheKVStore` requires producing all key-value pairs from the underlying `KVStore` while taking into account updated values from the cache.
In the current implementation, there is no guarantee that all values in `parent` have been cached. As a result, iteration is achieved by interleaved iteration through both `parent` and the cache (failing to actually benefit from caching).
[cacheMergeIterator](https://github.com/cosmos/cosmos-sdk/blob/d8391cb6796d770b02448bee70b865d824e43449/store/cachekv/mergeiterator.go) implements functions to provide a single iterator with an input of iterators over `parent` and the cache. This iterator iterates over keys from both iterators in a shared lexicographic order, and overrides the value provided by the parent iterator if the same key is dirty or deleted in the cache.
### Implementation Overview
Iterators over `parent` and the cache are generated and passed into `cacheMergeIterator`, which returns a single, interleaved iterator. Implementation of the `parent` iterator is up to the underlying `KVStore`. The remainder of this section covers the generation of the cache iterator.
Recall that `unsortedCache` is an unordered set of dirty cache keys. Our goal is to construct an ordered iterator over cache keys that fall within the `start` and `end` bounds requested.
Generating the cache iterator can be decomposed into four parts:
1. Finding all keys that exist in the range we are iterating over
2. Sorting this list of keys
3. Inserting these keys into `sortedCache` and removing them from `unsortedCache`
4. Returning an iterator over `sortedCache` with the desired range
Currently, the implementation for the first two parts is split into two cases, depending on the size of the unsorted cache. The two cases are as follows.
If the size of `unsortedCache` is less than `minSortSize` (currently 1024), a linear time approach is taken to search over keys.
```go
n := len(store.unsortedCache)
unsorted := make([]*kv.Pair, 0)
if n < minSortSize {
for key := range store.unsortedCache {
if dbm.IsKeyInDomain(conv.UnsafeStrToBytes(key), start, end) {
cacheValue := store.cache[key]
unsorted = append(unsorted, &kv.Pair{Key: []byte(key), Value: cacheValue.value})
}
}
store.clearUnsortedCacheSubset(unsorted, stateUnsorted)
return
}
```
Here, we iterate through all the keys in `unsortedCache` (i.e., the dirty cache keys), collecting those within the requested range in an unsorted slice called `unsorted`.
At this point, part 3. is achieved in `clearUnsortedCacheSubset()`. This function iterates through `unsorted`, removing each key from `unsortedCache`. Afterwards, `unsorted` is sorted. Lastly, it iterates through the now sorted slice, inserting key-value pairs into `sortedCache`. Any key marked for deletion is mapped to an arbitrary value (`[]byte{}`).
In the case that the size of `unsortedCache` is larger than `minSortSize`, a linear time approach to finding keys within the desired range is too slow to use. Instead, a slice of all keys in `unsortedCache` is sorted, and binary search is used to find the beginning and ending indices of the desired range. This produces an already-sorted slice that is passed into the same `clearUnsortedCacheSubset()` function. An iota identifier (`sortedState`) is used to skip the sorting step in the function.
Finally, part 4. is achieved with `memIterator`, which implements an iterator over the items in `sortedCache`.
As of [PR #12885](https://github.com/cosmos/cosmos-sdk/pull/12885), an optimization to the binary search case mitigates the overhead of sorting the entirety of the key set in `unsortedCache`. To avoid wasting the compute spent sorting, we should ensure that a reasonable amount of values are removed from `unsortedCache`. If the length of the range for iteration is less than `minSortedCache`, we widen the range of values for removal from `unsortedCache` to be up to `minSortedCache` in length. This amortizes the cost of processing elements across multiple calls.

View File

@ -0,0 +1,44 @@
package cachekv_test
import "crypto/rand"
func randSlice(sliceSize int) []byte {
bz := make([]byte, sliceSize)
_, _ = rand.Read(bz)
return bz
}
func incrementByteSlice(bz []byte) {
for index := len(bz) - 1; index >= 0; index-- {
if bz[index] < 255 {
bz[index]++
break
} else {
bz[index] = 0
}
}
}
// Generate many keys starting at startKey, and are in sequential order
func generateSequentialKeys(startKey []byte, numKeys int) [][]byte {
toReturn := make([][]byte, 0, numKeys)
cur := make([]byte, len(startKey))
copy(cur, startKey)
for i := 0; i < numKeys; i++ {
newKey := make([]byte, len(startKey))
copy(newKey, cur)
toReturn = append(toReturn, newKey)
incrementByteSlice(cur)
}
return toReturn
}
// Generate many random, unsorted keys
func generateRandomKeys(keySize, numKeys int) [][]byte {
toReturn := make([][]byte, 0, numKeys)
for i := 0; i < numKeys; i++ {
newKey := randSlice(keySize)
toReturn = append(toReturn, newKey)
}
return toReturn
}

View File

@ -0,0 +1,136 @@
package cachekv_test
import (
fmt "fmt"
"testing"
dbm "github.com/cosmos/cosmos-db"
"github.com/stretchr/testify/require"
"cosmossdk.io/store/cachekv"
"cosmossdk.io/store/dbadapter"
"cosmossdk.io/store/types"
)
func DoBenchmarkDeepCacheStack(b *testing.B, depth int) {
b.Helper()
db := dbm.NewMemDB()
initialStore := cachekv.NewStore(dbadapter.Store{DB: db})
nItems := 20
for i := 0; i < nItems; i++ {
initialStore.Set([]byte(fmt.Sprintf("hello%03d", i)), []byte{0})
}
var stack CacheStack
stack.Reset(initialStore)
for i := 0; i < depth; i++ {
stack.Snapshot()
store := stack.CurrentStore()
store.Set([]byte(fmt.Sprintf("hello%03d", i)), []byte{byte(i)})
}
store := stack.CurrentStore()
b.ResetTimer()
for i := 0; i < b.N; i++ {
it := store.Iterator(nil, nil)
items := make([][]byte, 0, nItems)
for ; it.Valid(); it.Next() {
items = append(items, it.Key())
it.Value()
}
it.Close()
require.Equal(b, nItems, len(items))
}
}
func BenchmarkDeepCacheStack1(b *testing.B) {
DoBenchmarkDeepCacheStack(b, 1)
}
func BenchmarkDeepCacheStack3(b *testing.B) {
DoBenchmarkDeepCacheStack(b, 3)
}
func BenchmarkDeepCacheStack10(b *testing.B) {
DoBenchmarkDeepCacheStack(b, 10)
}
func BenchmarkDeepCacheStack13(b *testing.B) {
DoBenchmarkDeepCacheStack(b, 13)
}
// CacheStack manages a stack of nested cache store to
// support the evm `StateDB`'s `Snapshot` and `RevertToSnapshot` methods.
type CacheStack struct {
initialStore types.CacheKVStore
// Context of the initial state before transaction execution.
// It's the context used by `StateDB.CommitedState`.
cacheStores []types.CacheKVStore
}
// CurrentContext returns the top context of cached stack,
// if the stack is empty, returns the initial context.
func (cs *CacheStack) CurrentStore() types.CacheKVStore {
l := len(cs.cacheStores)
if l == 0 {
return cs.initialStore
}
return cs.cacheStores[l-1]
}
// Reset sets the initial context and clear the cache context stack.
func (cs *CacheStack) Reset(initialStore types.CacheKVStore) {
cs.initialStore = initialStore
cs.cacheStores = nil
}
// IsEmpty returns true if the cache context stack is empty.
func (cs *CacheStack) IsEmpty() bool {
return len(cs.cacheStores) == 0
}
// Commit commits all the cached contexts from top to bottom in order and clears the stack by setting an empty slice of cache contexts.
func (cs *CacheStack) Commit() {
// commit in order from top to bottom
for i := len(cs.cacheStores) - 1; i >= 0; i-- {
cs.cacheStores[i].Write()
}
cs.cacheStores = nil
}
// CommitToRevision commit the cache after the target revision,
// to improve efficiency of db operations.
func (cs *CacheStack) CommitToRevision(target int) error {
if target < 0 || target >= len(cs.cacheStores) {
return fmt.Errorf("snapshot index %d out of bound [%d..%d)", target, 0, len(cs.cacheStores))
}
// commit in order from top to bottom
for i := len(cs.cacheStores) - 1; i > target; i-- {
cs.cacheStores[i].Write()
}
cs.cacheStores = cs.cacheStores[0 : target+1]
return nil
}
// Snapshot pushes a new cached context to the stack,
// and returns the index of it.
func (cs *CacheStack) Snapshot() int {
cs.cacheStores = append(cs.cacheStores, cachekv.NewStore(cs.CurrentStore()))
return len(cs.cacheStores) - 1
}
// RevertToSnapshot pops all the cached contexts after the target index (inclusive).
// the target should be snapshot index returned by `Snapshot`.
// This function panics if the index is out of bounds.
func (cs *CacheStack) RevertToSnapshot(target int) {
if target < 0 || target >= len(cs.cacheStores) {
panic(fmt.Errorf("snapshot index %d out of bound [%d..%d)", target, 0, len(cs.cacheStores)))
}
cs.cacheStores = cs.cacheStores[:target]
}

View File

@ -0,0 +1,91 @@
package internal
import (
"bytes"
"errors"
"github.com/tidwall/btree"
"cosmossdk.io/store/types"
)
const (
// The approximate number of items and children per B-tree node. Tuned with benchmarks.
// copied from memdb.
bTreeDegree = 32
)
var errKeyEmpty = errors.New("key cannot be empty")
// BTree implements the sorted cache for cachekv store,
// we don't use MemDB here because cachekv is used extensively in sdk core path,
// we need it to be as fast as possible, while `MemDB` is mainly used as a mocking db in unit tests.
//
// We choose tidwall/btree over google/btree here because it provides API to implement step iterator directly.
type BTree struct {
tree *btree.BTreeG[item]
}
// NewBTree creates a wrapper around `btree.BTreeG`.
func NewBTree() BTree {
return BTree{
tree: btree.NewBTreeGOptions(byKeys, btree.Options{
Degree: bTreeDegree,
NoLocks: false,
}),
}
}
func (bt BTree) Set(key, value []byte) {
bt.tree.Set(newItem(key, value))
}
func (bt BTree) Get(key []byte) []byte {
i, found := bt.tree.Get(newItem(key, nil))
if !found {
return nil
}
return i.value
}
func (bt BTree) Delete(key []byte) {
bt.tree.Delete(newItem(key, nil))
}
func (bt BTree) Iterator(start, end []byte) (types.Iterator, error) {
if (start != nil && len(start) == 0) || (end != nil && len(end) == 0) {
return nil, errKeyEmpty
}
return newMemIterator(start, end, bt, true), nil
}
func (bt BTree) ReverseIterator(start, end []byte) (types.Iterator, error) {
if (start != nil && len(start) == 0) || (end != nil && len(end) == 0) {
return nil, errKeyEmpty
}
return newMemIterator(start, end, bt, false), nil
}
// Copy the tree. This is a copy-on-write operation and is very fast because
// it only performs a shadowed copy.
func (bt BTree) Copy() BTree {
return BTree{
tree: bt.tree.Copy(),
}
}
// item is a btree item with byte slices as keys and values
type item struct {
key []byte
value []byte
}
// byKeys compares the items by key
func byKeys(a, b item) bool {
return bytes.Compare(a.key, b.key) == -1
}
// newItem creates a new pair item.
func newItem(key, value []byte) item {
return item{key: key, value: value}
}

View File

@ -0,0 +1,204 @@
package internal
import (
"testing"
"github.com/stretchr/testify/require"
"cosmossdk.io/store/types"
)
func TestGetSetDelete(t *testing.T) {
db := NewBTree()
// A nonexistent key should return nil.
value := db.Get([]byte("a"))
require.Nil(t, value)
// Set and get a value.
db.Set([]byte("a"), []byte{0x01})
db.Set([]byte("b"), []byte{0x02})
value = db.Get([]byte("a"))
require.Equal(t, []byte{0x01}, value)
value = db.Get([]byte("b"))
require.Equal(t, []byte{0x02}, value)
// Deleting a non-existent value is fine.
db.Delete([]byte("x"))
// Delete a value.
db.Delete([]byte("a"))
value = db.Get([]byte("a"))
require.Nil(t, value)
db.Delete([]byte("b"))
value = db.Get([]byte("b"))
require.Nil(t, value)
}
func TestDBIterator(t *testing.T) {
db := NewBTree()
for i := 0; i < 10; i++ {
if i != 6 { // but skip 6.
db.Set(int642Bytes(int64(i)), []byte{})
}
}
// Blank iterator keys should error
_, err := db.ReverseIterator([]byte{}, nil)
require.Equal(t, errKeyEmpty, err)
_, err = db.ReverseIterator(nil, []byte{})
require.Equal(t, errKeyEmpty, err)
itr, err := db.Iterator(nil, nil)
require.NoError(t, err)
verifyIterator(t, itr, []int64{0, 1, 2, 3, 4, 5, 7, 8, 9}, "forward iterator")
ritr, err := db.ReverseIterator(nil, nil)
require.NoError(t, err)
verifyIterator(t, ritr, []int64{9, 8, 7, 5, 4, 3, 2, 1, 0}, "reverse iterator")
itr, err = db.Iterator(nil, int642Bytes(0))
require.NoError(t, err)
verifyIterator(t, itr, []int64(nil), "forward iterator to 0")
ritr, err = db.ReverseIterator(int642Bytes(10), nil)
require.NoError(t, err)
verifyIterator(t, ritr, []int64(nil), "reverse iterator from 10 (ex)")
itr, err = db.Iterator(int642Bytes(0), nil)
require.NoError(t, err)
verifyIterator(t, itr, []int64{0, 1, 2, 3, 4, 5, 7, 8, 9}, "forward iterator from 0")
itr, err = db.Iterator(int642Bytes(1), nil)
require.NoError(t, err)
verifyIterator(t, itr, []int64{1, 2, 3, 4, 5, 7, 8, 9}, "forward iterator from 1")
ritr, err = db.ReverseIterator(nil, int642Bytes(10))
require.NoError(t, err)
verifyIterator(t, ritr,
[]int64{9, 8, 7, 5, 4, 3, 2, 1, 0}, "reverse iterator from 10 (ex)")
ritr, err = db.ReverseIterator(nil, int642Bytes(9))
require.NoError(t, err)
verifyIterator(t, ritr,
[]int64{8, 7, 5, 4, 3, 2, 1, 0}, "reverse iterator from 9 (ex)")
ritr, err = db.ReverseIterator(nil, int642Bytes(8))
require.NoError(t, err)
verifyIterator(t, ritr,
[]int64{7, 5, 4, 3, 2, 1, 0}, "reverse iterator from 8 (ex)")
itr, err = db.Iterator(int642Bytes(5), int642Bytes(6))
require.NoError(t, err)
verifyIterator(t, itr, []int64{5}, "forward iterator from 5 to 6")
itr, err = db.Iterator(int642Bytes(5), int642Bytes(7))
require.NoError(t, err)
verifyIterator(t, itr, []int64{5}, "forward iterator from 5 to 7")
itr, err = db.Iterator(int642Bytes(5), int642Bytes(8))
require.NoError(t, err)
verifyIterator(t, itr, []int64{5, 7}, "forward iterator from 5 to 8")
itr, err = db.Iterator(int642Bytes(6), int642Bytes(7))
require.NoError(t, err)
verifyIterator(t, itr, []int64(nil), "forward iterator from 6 to 7")
itr, err = db.Iterator(int642Bytes(6), int642Bytes(8))
require.NoError(t, err)
verifyIterator(t, itr, []int64{7}, "forward iterator from 6 to 8")
itr, err = db.Iterator(int642Bytes(7), int642Bytes(8))
require.NoError(t, err)
verifyIterator(t, itr, []int64{7}, "forward iterator from 7 to 8")
ritr, err = db.ReverseIterator(int642Bytes(4), int642Bytes(5))
require.NoError(t, err)
verifyIterator(t, ritr, []int64{4}, "reverse iterator from 5 (ex) to 4")
ritr, err = db.ReverseIterator(int642Bytes(4), int642Bytes(6))
require.NoError(t, err)
verifyIterator(t, ritr,
[]int64{5, 4}, "reverse iterator from 6 (ex) to 4")
ritr, err = db.ReverseIterator(int642Bytes(4), int642Bytes(7))
require.NoError(t, err)
verifyIterator(t, ritr,
[]int64{5, 4}, "reverse iterator from 7 (ex) to 4")
ritr, err = db.ReverseIterator(int642Bytes(5), int642Bytes(6))
require.NoError(t, err)
verifyIterator(t, ritr, []int64{5}, "reverse iterator from 6 (ex) to 5")
ritr, err = db.ReverseIterator(int642Bytes(5), int642Bytes(7))
require.NoError(t, err)
verifyIterator(t, ritr, []int64{5}, "reverse iterator from 7 (ex) to 5")
ritr, err = db.ReverseIterator(int642Bytes(6), int642Bytes(7))
require.NoError(t, err)
verifyIterator(t, ritr,
[]int64(nil), "reverse iterator from 7 (ex) to 6")
ritr, err = db.ReverseIterator(int642Bytes(10), nil)
require.NoError(t, err)
verifyIterator(t, ritr, []int64(nil), "reverse iterator to 10")
ritr, err = db.ReverseIterator(int642Bytes(6), nil)
require.NoError(t, err)
verifyIterator(t, ritr, []int64{9, 8, 7}, "reverse iterator to 6")
ritr, err = db.ReverseIterator(int642Bytes(5), nil)
require.NoError(t, err)
verifyIterator(t, ritr, []int64{9, 8, 7, 5}, "reverse iterator to 5")
ritr, err = db.ReverseIterator(int642Bytes(8), int642Bytes(9))
require.NoError(t, err)
verifyIterator(t, ritr, []int64{8}, "reverse iterator from 9 (ex) to 8")
ritr, err = db.ReverseIterator(int642Bytes(2), int642Bytes(4))
require.NoError(t, err)
verifyIterator(t, ritr,
[]int64{3, 2}, "reverse iterator from 4 (ex) to 2")
ritr, err = db.ReverseIterator(int642Bytes(4), int642Bytes(2))
require.NoError(t, err)
verifyIterator(t, ritr,
[]int64(nil), "reverse iterator from 2 (ex) to 4")
// Ensure that the iterators don't panic with an empty database.
db2 := NewBTree()
itr, err = db2.Iterator(nil, nil)
require.NoError(t, err)
verifyIterator(t, itr, nil, "forward iterator with empty db")
ritr, err = db2.ReverseIterator(nil, nil)
require.NoError(t, err)
verifyIterator(t, ritr, nil, "reverse iterator with empty db")
}
func verifyIterator(t *testing.T, itr types.Iterator, expected []int64, msg string) {
t.Helper()
i := 0
for itr.Valid() {
key := itr.Key()
require.Equal(t, expected[i], bytes2Int64(key), "iterator: %d mismatches", i)
itr.Next()
i++
}
require.Equal(t, i, len(expected), "expected to have fully iterated over all the elements in iter")
require.NoError(t, itr.Close())
}
func int642Bytes(i int64) []byte {
return types.Uint64ToBigEndian(uint64(i))
}
func bytes2Int64(buf []byte) int64 {
return int64(types.BigEndianToUint64(buf))
}

View File

@ -0,0 +1,120 @@
package internal
import (
"bytes"
"errors"
"github.com/tidwall/btree"
"cosmossdk.io/store/types"
)
var _ types.Iterator = (*memIterator)(nil)
// memIterator iterates over iterKVCache items.
// if value is nil, means it was deleted.
// Implements Iterator.
type memIterator struct {
iter btree.IterG[item]
start []byte
end []byte
ascending bool
valid bool
}
func newMemIterator(start, end []byte, items BTree, ascending bool) *memIterator {
iter := items.tree.Iter()
var valid bool
if ascending {
if start != nil {
valid = iter.Seek(newItem(start, nil))
} else {
valid = iter.First()
}
} else {
if end != nil {
valid = iter.Seek(newItem(end, nil))
if !valid {
valid = iter.Last()
} else {
// end is exclusive
valid = iter.Prev()
}
} else {
valid = iter.Last()
}
}
mi := &memIterator{
iter: iter,
start: start,
end: end,
ascending: ascending,
valid: valid,
}
if mi.valid {
mi.valid = mi.keyInRange(mi.Key())
}
return mi
}
func (mi *memIterator) Domain() (start, end []byte) {
return mi.start, mi.end
}
func (mi *memIterator) Close() error {
mi.iter.Release()
return nil
}
func (mi *memIterator) Error() error {
if !mi.Valid() {
return errors.New("invalid memIterator")
}
return nil
}
func (mi *memIterator) Valid() bool {
return mi.valid
}
func (mi *memIterator) Next() {
mi.assertValid()
if mi.ascending {
mi.valid = mi.iter.Next()
} else {
mi.valid = mi.iter.Prev()
}
if mi.valid {
mi.valid = mi.keyInRange(mi.Key())
}
}
func (mi *memIterator) keyInRange(key []byte) bool {
if mi.ascending && mi.end != nil && bytes.Compare(key, mi.end) >= 0 {
return false
}
if !mi.ascending && mi.start != nil && bytes.Compare(key, mi.start) < 0 {
return false
}
return true
}
func (mi *memIterator) Key() []byte {
return mi.iter.Item().key
}
func (mi *memIterator) Value() []byte {
return mi.iter.Item().value
}
func (mi *memIterator) assertValid() {
if err := mi.Error(); err != nil {
panic(err)
}
}

View File

@ -0,0 +1,235 @@
package internal
import (
"bytes"
"errors"
"cosmossdk.io/store/types"
)
// cacheMergeIterator merges a parent Iterator and a cache Iterator.
// The cache iterator may return nil keys to signal that an item
// had been deleted (but not deleted in the parent).
// If the cache iterator has the same key as the parent, the
// cache shadows (overrides) the parent.
//
// TODO: Optimize by memoizing.
type cacheMergeIterator struct {
parent types.Iterator
cache types.Iterator
ascending bool
valid bool
}
var _ types.Iterator = (*cacheMergeIterator)(nil)
func NewCacheMergeIterator(parent, cache types.Iterator, ascending bool) types.Iterator {
iter := &cacheMergeIterator{
parent: parent,
cache: cache,
ascending: ascending,
}
iter.valid = iter.skipUntilExistsOrInvalid()
return iter
}
// Domain implements Iterator.
// Returns parent domain because cache and parent domains are the same.
func (iter *cacheMergeIterator) Domain() (start, end []byte) {
return iter.parent.Domain()
}
// Valid implements Iterator.
func (iter *cacheMergeIterator) Valid() bool {
return iter.valid
}
// Next implements Iterator
func (iter *cacheMergeIterator) Next() {
iter.assertValid()
switch {
case !iter.parent.Valid():
// If parent is invalid, get the next cache item.
iter.cache.Next()
case !iter.cache.Valid():
// If cache is invalid, get the next parent item.
iter.parent.Next()
default:
// Both are valid. Compare keys.
keyP, keyC := iter.parent.Key(), iter.cache.Key()
switch iter.compare(keyP, keyC) {
case -1: // parent < cache
iter.parent.Next()
case 0: // parent == cache
iter.parent.Next()
iter.cache.Next()
case 1: // parent > cache
iter.cache.Next()
}
}
iter.valid = iter.skipUntilExistsOrInvalid()
}
// Key implements Iterator
func (iter *cacheMergeIterator) Key() []byte {
iter.assertValid()
// If parent is invalid, get the cache key.
if !iter.parent.Valid() {
return iter.cache.Key()
}
// If cache is invalid, get the parent key.
if !iter.cache.Valid() {
return iter.parent.Key()
}
// Both are valid. Compare keys.
keyP, keyC := iter.parent.Key(), iter.cache.Key()
cmp := iter.compare(keyP, keyC)
switch cmp {
case -1: // parent < cache
return keyP
case 0: // parent == cache
return keyP
case 1: // parent > cache
return keyC
default:
panic("invalid compare result")
}
}
// Value implements Iterator
func (iter *cacheMergeIterator) Value() []byte {
iter.assertValid()
// If parent is invalid, get the cache value.
if !iter.parent.Valid() {
return iter.cache.Value()
}
// If cache is invalid, get the parent value.
if !iter.cache.Valid() {
return iter.parent.Value()
}
// Both are valid. Compare keys.
keyP, keyC := iter.parent.Key(), iter.cache.Key()
cmp := iter.compare(keyP, keyC)
switch cmp {
case -1: // parent < cache
return iter.parent.Value()
case 0: // parent == cache
return iter.cache.Value()
case 1: // parent > cache
return iter.cache.Value()
default:
panic("invalid comparison result")
}
}
// Close implements Iterator
func (iter *cacheMergeIterator) Close() error {
err1 := iter.cache.Close()
if err := iter.parent.Close(); err != nil {
return err
}
return err1
}
// Error returns an error if the cacheMergeIterator is invalid defined by the
// Valid method.
func (iter *cacheMergeIterator) Error() error {
if !iter.Valid() {
return errors.New("invalid cacheMergeIterator")
}
return nil
}
// If not valid, panics.
// NOTE: May have side-effect of iterating over cache.
func (iter *cacheMergeIterator) assertValid() {
if err := iter.Error(); err != nil {
panic(err)
}
}
// Like bytes.Compare but opposite if not ascending.
func (iter *cacheMergeIterator) compare(a, b []byte) int {
if iter.ascending {
return bytes.Compare(a, b)
}
return bytes.Compare(a, b) * -1
}
// Skip all delete-items from the cache w/ `key < until`. After this function,
// current cache item is a non-delete-item, or `until <= key`.
// If the current cache item is not a delete item, does nothing.
// If `until` is nil, there is no limit, and cache may end up invalid.
// CONTRACT: cache is valid.
func (iter *cacheMergeIterator) skipCacheDeletes(until []byte) {
for iter.cache.Valid() &&
iter.cache.Value() == nil &&
(until == nil || iter.compare(iter.cache.Key(), until) < 0) {
iter.cache.Next()
}
}
// Fast forwards cache (or parent+cache in case of deleted items) until current
// item exists, or until iterator becomes invalid.
// Returns whether the iterator is valid.
func (iter *cacheMergeIterator) skipUntilExistsOrInvalid() bool {
for {
// If parent is invalid, fast-forward cache.
if !iter.parent.Valid() {
iter.skipCacheDeletes(nil)
return iter.cache.Valid()
}
// Parent is valid.
if !iter.cache.Valid() {
return true
}
// Parent is valid, cache is valid.
// Compare parent and cache.
keyP := iter.parent.Key()
keyC := iter.cache.Key()
switch iter.compare(keyP, keyC) {
case -1: // parent < cache.
return true
case 0: // parent == cache.
// Skip over if cache item is a delete.
valueC := iter.cache.Value()
if valueC == nil {
iter.parent.Next()
iter.cache.Next()
continue
}
// Cache is not a delete.
return true // cache exists.
case 1: // cache < parent
// Skip over if cache item is a delete.
valueC := iter.cache.Value()
if valueC == nil {
iter.skipCacheDeletes(keyP)
continue
}
// Cache is not a delete.
return true // cache exists.
}
}
}

View File

@ -0,0 +1,44 @@
package cachekv
import (
"strconv"
"testing"
"cosmossdk.io/store/cachekv/internal"
)
func BenchmarkLargeUnsortedMisses(b *testing.B) {
for i := 0; i < b.N; i++ {
b.StopTimer()
store := generateStore()
b.StartTimer()
for k := 0; k < 10000; k++ {
// cache has A + Z values
// these are within range, but match nothing
store.dirtyItems([]byte("B1"), []byte("B2"))
}
}
}
func generateStore() *Store {
cache := map[string]*cValue{}
unsorted := map[string]struct{}{}
for i := 0; i < 5000; i++ {
key := "A" + strconv.Itoa(i)
unsorted[key] = struct{}{}
cache[key] = &cValue{}
}
for i := 0; i < 5000; i++ {
key := "Z" + strconv.Itoa(i)
unsorted[key] = struct{}{}
cache[key] = &cValue{}
}
return &Store{
cache: cache,
unsortedCache: unsorted,
sortedCache: internal.NewBTree(),
}
}

View File

@ -0,0 +1,141 @@
package cachekv
import "testing"
func TestFindStartIndex(t *testing.T) {
tests := []struct {
name string
sortedL []string
query string
want int
}{
{
name: "non-existent value",
sortedL: []string{"a", "b", "c", "d", "e", "l", "m", "n", "u", "v", "w", "x", "y", "z"},
query: "o",
want: 8,
},
{
name: "dupes start at index 0",
sortedL: []string{"a", "a", "a", "b", "c", "d", "e", "l", "m", "n", "u", "v", "w", "x", "y", "z"},
query: "a",
want: 0,
},
{
name: "dupes start at non-index 0",
sortedL: []string{"a", "c", "c", "c", "c", "d", "e", "l", "m", "n", "u", "v", "w", "x", "y", "z"},
query: "c",
want: 1,
},
{
name: "at end",
sortedL: []string{"a", "e", "u", "v", "w", "x", "y", "z"},
query: "z",
want: 7,
},
{
name: "dupes at end",
sortedL: []string{"a", "e", "u", "v", "w", "x", "y", "z", "z", "z", "z"},
query: "z",
want: 7,
},
{
name: "entirely dupes",
sortedL: []string{"z", "z", "z", "z", "z"},
query: "z",
want: 0,
},
{
name: "non-existent but within >=start",
sortedL: []string{"z", "z", "z", "z", "z"},
query: "p",
want: 0,
},
{
name: "non-existent and out of range",
sortedL: []string{"d", "e", "f", "g", "h"},
query: "z",
want: -1,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
body := tt.sortedL
got := findStartIndex(body, tt.query)
if got != tt.want {
t.Fatalf("Got: %d, want: %d", got, tt.want)
}
})
}
}
func TestFindEndIndex(t *testing.T) {
tests := []struct {
name string
sortedL []string
query string
want int
}{
{
name: "non-existent value",
sortedL: []string{"a", "b", "c", "d", "e", "l", "m", "n", "u", "v", "w", "x", "y", "z"},
query: "o",
want: 7,
},
{
name: "dupes start at index 0",
sortedL: []string{"a", "a", "a", "b", "c", "d", "e", "l", "m", "n", "u", "v", "w", "x", "y", "z"},
query: "a",
want: 0,
},
{
name: "dupes start at non-index 0",
sortedL: []string{"a", "c", "c", "c", "c", "d", "e", "l", "m", "n", "u", "v", "w", "x", "y", "z"},
query: "c",
want: 1,
},
{
name: "at end",
sortedL: []string{"a", "e", "u", "v", "w", "x", "y", "z"},
query: "z",
want: 7,
},
{
name: "dupes at end",
sortedL: []string{"a", "e", "u", "v", "w", "x", "y", "z", "z", "z", "z"},
query: "z",
want: 7,
},
{
name: "entirely dupes",
sortedL: []string{"z", "z", "z", "z", "z"},
query: "z",
want: 0,
},
{
name: "non-existent and out of range",
sortedL: []string{"z", "z", "z", "z", "z"},
query: "p",
want: -1,
},
{
name: "non-existent and out of range",
sortedL: []string{"d", "e", "f", "g", "h"},
query: "z",
want: 4,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
body := tt.sortedL
got := findEndIndex(body, tt.query)
if got != tt.want {
t.Fatalf("Got: %d, want: %d", got, tt.want)
}
})
}
}

408
store/cachekv/store.go Normal file
View File

@ -0,0 +1,408 @@
package cachekv
import (
"bytes"
"io"
"sort"
"sync"
dbm "github.com/cosmos/cosmos-db"
"cosmossdk.io/math"
"cosmossdk.io/store/cachekv/internal"
"cosmossdk.io/store/internal/conv"
"cosmossdk.io/store/internal/kv"
"cosmossdk.io/store/tracekv"
"cosmossdk.io/store/types"
)
// cValue represents a cached value.
// If dirty is true, it indicates the cached value is different from the underlying value.
type cValue struct {
value []byte
dirty bool
}
// Store wraps an in-memory cache around an underlying types.KVStore.
type Store struct {
mtx sync.Mutex
cache map[string]*cValue
unsortedCache map[string]struct{}
sortedCache internal.BTree // always ascending sorted
parent types.KVStore
}
var _ types.CacheKVStore = (*Store)(nil)
// NewStore creates a new Store object
func NewStore(parent types.KVStore) *Store {
return &Store{
cache: make(map[string]*cValue),
unsortedCache: make(map[string]struct{}),
sortedCache: internal.NewBTree(),
parent: parent,
}
}
// GetStoreType implements Store.
func (store *Store) GetStoreType() types.StoreType {
return store.parent.GetStoreType()
}
// Get implements types.KVStore.
func (store *Store) Get(key []byte) (value []byte) {
store.mtx.Lock()
defer store.mtx.Unlock()
types.AssertValidKey(key)
cacheValue, ok := store.cache[conv.UnsafeBytesToStr(key)]
if !ok {
value = store.parent.Get(key)
store.setCacheValue(key, value, false)
} else {
value = cacheValue.value
}
return value
}
// Set implements types.KVStore.
func (store *Store) Set(key, value []byte) {
types.AssertValidKey(key)
types.AssertValidValue(value)
store.mtx.Lock()
defer store.mtx.Unlock()
store.setCacheValue(key, value, true)
}
// Has implements types.KVStore.
func (store *Store) Has(key []byte) bool {
value := store.Get(key)
return value != nil
}
// Delete implements types.KVStore.
func (store *Store) Delete(key []byte) {
types.AssertValidKey(key)
store.mtx.Lock()
defer store.mtx.Unlock()
store.setCacheValue(key, nil, true)
}
func (store *Store) resetCaches() {
if len(store.cache) > 100_000 {
// Cache is too large. We likely did something linear time
// (e.g. Epoch block, Genesis block, etc). Free the old caches from memory, and let them get re-allocated.
// TODO: In a future CacheKV redesign, such linear workloads should get into a different cache instantiation.
// 100_000 is arbitrarily chosen as it solved Osmosis' InitGenesis RAM problem.
store.cache = make(map[string]*cValue)
store.unsortedCache = make(map[string]struct{})
} else {
// Clear the cache using the map clearing idiom
// and not allocating fresh objects.
// Please see https://bencher.orijtech.com/perfclinic/mapclearing/
for key := range store.cache {
delete(store.cache, key)
}
for key := range store.unsortedCache {
delete(store.unsortedCache, key)
}
}
store.sortedCache = internal.NewBTree()
}
// Implements Cachetypes.KVStore.
func (store *Store) Write() {
store.mtx.Lock()
defer store.mtx.Unlock()
if len(store.cache) == 0 && len(store.unsortedCache) == 0 {
store.sortedCache = internal.NewBTree()
return
}
type cEntry struct {
key string
val *cValue
}
// We need a copy of all of the keys.
// Not the best. To reduce RAM pressure, we copy the values as well
// and clear out the old caches right after the copy.
sortedCache := make([]cEntry, 0, len(store.cache))
for key, dbValue := range store.cache {
if dbValue.dirty {
sortedCache = append(sortedCache, cEntry{key, dbValue})
}
}
store.resetCaches()
sort.Slice(sortedCache, func(i, j int) bool {
return sortedCache[i].key < sortedCache[j].key
})
// TODO: Consider allowing usage of Batch, which would allow the write to
// at least happen atomically.
for _, obj := range sortedCache {
// We use []byte(key) instead of conv.UnsafeStrToBytes because we cannot
// be sure if the underlying store might do a save with the byteslice or
// not. Once we get confirmation that .Delete is guaranteed not to
// save the byteslice, then we can assume only a read-only copy is sufficient.
if obj.val.value != nil {
// It already exists in the parent, hence update it.
store.parent.Set([]byte(obj.key), obj.val.value)
} else {
store.parent.Delete([]byte(obj.key))
}
}
}
// CacheWrap implements CacheWrapper.
func (store *Store) CacheWrap() types.CacheWrap {
return NewStore(store)
}
// CacheWrapWithTrace implements the CacheWrapper interface.
func (store *Store) CacheWrapWithTrace(w io.Writer, tc types.TraceContext) types.CacheWrap {
return NewStore(tracekv.NewStore(store, w, tc))
}
//----------------------------------------
// Iteration
// Iterator implements types.KVStore.
func (store *Store) Iterator(start, end []byte) types.Iterator {
return store.iterator(start, end, true)
}
// ReverseIterator implements types.KVStore.
func (store *Store) ReverseIterator(start, end []byte) types.Iterator {
return store.iterator(start, end, false)
}
func (store *Store) iterator(start, end []byte, ascending bool) types.Iterator {
store.mtx.Lock()
defer store.mtx.Unlock()
store.dirtyItems(start, end)
isoSortedCache := store.sortedCache.Copy()
var (
err error
parent, cache types.Iterator
)
if ascending {
parent = store.parent.Iterator(start, end)
cache, err = isoSortedCache.Iterator(start, end)
} else {
parent = store.parent.ReverseIterator(start, end)
cache, err = isoSortedCache.ReverseIterator(start, end)
}
if err != nil {
panic(err)
}
return internal.NewCacheMergeIterator(parent, cache, ascending)
}
func findStartIndex(strL []string, startQ string) int {
// Modified binary search to find the very first element in >=startQ.
if len(strL) == 0 {
return -1
}
var left, right, mid int
right = len(strL) - 1
for left <= right {
mid = (left + right) >> 1
midStr := strL[mid]
if midStr == startQ {
// Handle condition where there might be multiple values equal to startQ.
// We are looking for the very first value < midStL, that i+1 will be the first
// element >= midStr.
for i := mid - 1; i >= 0; i-- {
if strL[i] != midStr {
return i + 1
}
}
return 0
}
if midStr < startQ {
left = mid + 1
} else { // midStrL > startQ
right = mid - 1
}
}
if left >= 0 && left < len(strL) && strL[left] >= startQ {
return left
}
return -1
}
func findEndIndex(strL []string, endQ string) int {
if len(strL) == 0 {
return -1
}
// Modified binary search to find the very first element <endQ.
var left, right, mid int
right = len(strL) - 1
for left <= right {
mid = (left + right) >> 1
midStr := strL[mid]
if midStr == endQ {
// Handle condition where there might be multiple values equal to startQ.
// We are looking for the very first value < midStL, that i+1 will be the first
// element >= midStr.
for i := mid - 1; i >= 0; i-- {
if strL[i] < midStr {
return i + 1
}
}
return 0
}
if midStr < endQ {
left = mid + 1
} else { // midStrL > startQ
right = mid - 1
}
}
// Binary search failed, now let's find a value less than endQ.
for i := right; i >= 0; i-- {
if strL[i] < endQ {
return i
}
}
return -1
}
type sortState int
const (
stateUnsorted sortState = iota
stateAlreadySorted
)
const minSortSize = 1024
// Constructs a slice of dirty items, to use w/ memIterator.
func (store *Store) dirtyItems(start, end []byte) {
startStr, endStr := conv.UnsafeBytesToStr(start), conv.UnsafeBytesToStr(end)
if end != nil && startStr > endStr {
// Nothing to do here.
return
}
n := len(store.unsortedCache)
unsorted := make([]*kv.Pair, 0)
// If the unsortedCache is too big, its costs too much to determine
// whats in the subset we are concerned about.
// If you are interleaving iterator calls with writes, this can easily become an
// O(N^2) overhead.
// Even without that, too many range checks eventually becomes more expensive
// than just not having the cache.
if n < minSortSize {
for key := range store.unsortedCache {
// dbm.IsKeyInDomain is nil safe and returns true iff key is greater than start
if dbm.IsKeyInDomain(conv.UnsafeStrToBytes(key), start, end) {
cacheValue := store.cache[key]
unsorted = append(unsorted, &kv.Pair{Key: []byte(key), Value: cacheValue.value})
}
}
store.clearUnsortedCacheSubset(unsorted, stateUnsorted)
return
}
// Otherwise it is large so perform a modified binary search to find
// the target ranges for the keys that we should be looking for.
strL := make([]string, 0, n)
for key := range store.unsortedCache {
strL = append(strL, key)
}
sort.Strings(strL)
// Now find the values within the domain
// [start, end)
startIndex := findStartIndex(strL, startStr)
if startIndex < 0 {
startIndex = 0
}
var endIndex int
if end == nil {
endIndex = len(strL) - 1
} else {
endIndex = findEndIndex(strL, endStr)
}
if endIndex < 0 {
endIndex = len(strL) - 1
}
// Since we spent cycles to sort the values, we should process and remove a reasonable amount
// ensure start to end is at least minSortSize in size
// if below minSortSize, expand it to cover additional values
// this amortizes the cost of processing elements across multiple calls
if endIndex-startIndex < minSortSize {
endIndex = math.Min(startIndex+minSortSize, len(strL)-1)
if endIndex-startIndex < minSortSize {
startIndex = math.Max(endIndex-minSortSize, 0)
}
}
kvL := make([]*kv.Pair, 0, 1+endIndex-startIndex)
for i := startIndex; i <= endIndex; i++ {
key := strL[i]
cacheValue := store.cache[key]
kvL = append(kvL, &kv.Pair{Key: []byte(key), Value: cacheValue.value})
}
// kvL was already sorted so pass it in as is.
store.clearUnsortedCacheSubset(kvL, stateAlreadySorted)
}
func (store *Store) clearUnsortedCacheSubset(unsorted []*kv.Pair, sortState sortState) {
n := len(store.unsortedCache)
if len(unsorted) == n { // This pattern allows the Go compiler to emit the map clearing idiom for the entire map.
for key := range store.unsortedCache {
delete(store.unsortedCache, key)
}
} else { // Otherwise, normally delete the unsorted keys from the map.
for _, kv := range unsorted {
delete(store.unsortedCache, conv.UnsafeBytesToStr(kv.Key))
}
}
if sortState == stateUnsorted {
sort.Slice(unsorted, func(i, j int) bool {
return bytes.Compare(unsorted[i].Key, unsorted[j].Key) < 0
})
}
for _, item := range unsorted {
// sortedCache is able to store `nil` value to represent deleted items.
store.sortedCache.Set(item.Key, item.Value)
}
}
//----------------------------------------
// etc
// Only entrypoint to mutate store.cache.
// A `nil` value means a deletion.
func (store *Store) setCacheValue(key, value []byte, dirty bool) {
keyStr := conv.UnsafeBytesToStr(key)
store.cache[keyStr] = &cValue{
value: value,
dirty: dirty,
}
if dirty {
store.unsortedCache[keyStr] = struct{}{}
}
}

View File

@ -0,0 +1,153 @@
package cachekv_test
import (
"testing"
dbm "github.com/cosmos/cosmos-db"
"cosmossdk.io/store/cachekv"
"cosmossdk.io/store/dbadapter"
)
var sink interface{}
const defaultValueSizeBz = 1 << 12
// This benchmark measures the time of iterator.Next() when the parent store is blank
func benchmarkBlankParentIteratorNext(b *testing.B, keysize int) {
b.Helper()
mem := dbadapter.Store{DB: dbm.NewMemDB()}
kvstore := cachekv.NewStore(mem)
// Use a singleton for value, to not waste time computing it
value := randSlice(defaultValueSizeBz)
// Use simple values for keys, pick a random start,
// and take next b.N keys sequentially after.]
startKey := randSlice(32)
// Add 1 to avoid issues when b.N = 1
keys := generateSequentialKeys(startKey, b.N+1)
for _, k := range keys {
kvstore.Set(k, value)
}
b.ReportAllocs()
b.ResetTimer()
iter := kvstore.Iterator(keys[0], keys[b.N])
defer iter.Close()
for ; iter.Valid(); iter.Next() {
_ = iter.Key()
// deadcode elimination stub
sink = iter
}
}
// Benchmark setting New keys to a store, where the new keys are in sequence.
func benchmarkBlankParentAppend(b *testing.B, keysize int) {
b.Helper()
mem := dbadapter.Store{DB: dbm.NewMemDB()}
kvstore := cachekv.NewStore(mem)
// Use a singleton for value, to not waste time computing it
value := randSlice(32)
// Use simple values for keys, pick a random start,
// and take next b.N keys sequentially after.
startKey := randSlice(32)
keys := generateSequentialKeys(startKey, b.N)
b.ReportAllocs()
b.ResetTimer()
for _, k := range keys {
kvstore.Set(k, value)
}
}
// Benchmark setting New keys to a store, where the new keys are random.
// the speed of this function does not depend on the values in the parent store
func benchmarkRandomSet(b *testing.B, keysize int) {
b.Helper()
mem := dbadapter.Store{DB: dbm.NewMemDB()}
kvstore := cachekv.NewStore(mem)
// Use a singleton for value, to not waste time computing it
value := randSlice(defaultValueSizeBz)
// Add 1 to avoid issues when b.N = 1
keys := generateRandomKeys(keysize, b.N+1)
b.ReportAllocs()
b.ResetTimer()
for _, k := range keys {
kvstore.Set(k, value)
}
iter := kvstore.Iterator(keys[0], keys[b.N])
defer iter.Close()
for ; iter.Valid(); iter.Next() {
_ = iter.Key()
// deadcode elimination stub
sink = iter
}
}
// Benchmark creating an iterator on a parent with D entries,
// that are all deleted in the cacheKV store.
// We essentially are benchmarking the cacheKV iterator creation & iteration times
// with the number of entries deleted in the parent.
func benchmarkIteratorOnParentWithManyDeletes(b *testing.B, numDeletes int) {
b.Helper()
mem := dbadapter.Store{DB: dbm.NewMemDB()}
// Use a singleton for value, to not waste time computing it
value := randSlice(32)
// Use simple values for keys, pick a random start,
// and take next D keys sequentially after.
startKey := randSlice(32)
// Add 1 to avoid issues when numDeletes = 1
keys := generateSequentialKeys(startKey, numDeletes+1)
// setup parent db with D keys.
for _, k := range keys {
mem.Set(k, value)
}
kvstore := cachekv.NewStore(mem)
// Delete all keys from the cache KV store.
// The keys[1:] is to keep at least one entry in parent, due to a bug in the SDK iterator design.
// Essentially the iterator will never be valid, in that it should never run.
// However, this is incompatible with the for loop structure the SDK uses, hence
// causes a panic. Thus we do keys[1:].
for _, k := range keys[1:] {
kvstore.Delete(k)
}
b.ReportAllocs()
b.ResetTimer()
iter := kvstore.Iterator(keys[0], keys[numDeletes])
defer iter.Close()
for ; iter.Valid(); iter.Next() {
_ = iter.Key()
// deadcode elimination stub
sink = iter
}
}
func BenchmarkBlankParentIteratorNextKeySize32(b *testing.B) {
benchmarkBlankParentIteratorNext(b, 32)
}
func BenchmarkBlankParentAppendKeySize32(b *testing.B) {
benchmarkBlankParentAppend(b, 32)
}
func BenchmarkSetKeySize32(b *testing.B) {
benchmarkRandomSet(b, 32)
}
func BenchmarkIteratorOnParentWith1MDeletes(b *testing.B) {
benchmarkIteratorOnParentWithManyDeletes(b, 1_000_000)
}

694
store/cachekv/store_test.go Normal file
View File

@ -0,0 +1,694 @@
package cachekv_test
import (
"fmt"
"testing"
dbm "github.com/cosmos/cosmos-db"
"github.com/stretchr/testify/require"
"cosmossdk.io/math/unsafe"
"cosmossdk.io/store/cachekv"
"cosmossdk.io/store/dbadapter"
"cosmossdk.io/store/types"
)
func newCacheKVStore() types.CacheKVStore {
mem := dbadapter.Store{DB: dbm.NewMemDB()}
return cachekv.NewStore(mem)
}
func keyFmt(i int) []byte { return bz(fmt.Sprintf("key%0.8d", i)) }
func valFmt(i int) []byte { return bz(fmt.Sprintf("value%0.8d", i)) }
func TestCacheKVStore(t *testing.T) {
mem := dbadapter.Store{DB: dbm.NewMemDB()}
st := cachekv.NewStore(mem)
require.Empty(t, st.Get(keyFmt(1)), "Expected `key1` to be empty")
// put something in mem and in cache
mem.Set(keyFmt(1), valFmt(1))
st.Set(keyFmt(1), valFmt(1))
require.Equal(t, valFmt(1), st.Get(keyFmt(1)))
// update it in cache, shoudn't change mem
st.Set(keyFmt(1), valFmt(2))
require.Equal(t, valFmt(2), st.Get(keyFmt(1)))
require.Equal(t, valFmt(1), mem.Get(keyFmt(1)))
// write it. should change mem
st.Write()
require.Equal(t, valFmt(2), mem.Get(keyFmt(1)))
require.Equal(t, valFmt(2), st.Get(keyFmt(1)))
// more writes and checks
st.Write()
st.Write()
require.Equal(t, valFmt(2), mem.Get(keyFmt(1)))
require.Equal(t, valFmt(2), st.Get(keyFmt(1)))
// make a new one, check it
st = cachekv.NewStore(mem)
require.Equal(t, valFmt(2), st.Get(keyFmt(1)))
// make a new one and delete - should not be removed from mem
st = cachekv.NewStore(mem)
st.Delete(keyFmt(1))
require.Empty(t, st.Get(keyFmt(1)))
require.Equal(t, mem.Get(keyFmt(1)), valFmt(2))
// Write. should now be removed from both
st.Write()
require.Empty(t, st.Get(keyFmt(1)), "Expected `key1` to be empty")
require.Empty(t, mem.Get(keyFmt(1)), "Expected `key1` to be empty")
}
func TestCacheKVStoreNoNilSet(t *testing.T) {
mem := dbadapter.Store{DB: dbm.NewMemDB()}
st := cachekv.NewStore(mem)
require.Panics(t, func() { st.Set([]byte("key"), nil) }, "setting a nil value should panic")
require.Panics(t, func() { st.Set(nil, []byte("value")) }, "setting a nil key should panic")
require.Panics(t, func() { st.Set([]byte(""), []byte("value")) }, "setting an empty key should panic")
}
func TestCacheKVStoreNested(t *testing.T) {
mem := dbadapter.Store{DB: dbm.NewMemDB()}
st := cachekv.NewStore(mem)
// set. check its there on st and not on mem.
st.Set(keyFmt(1), valFmt(1))
require.Empty(t, mem.Get(keyFmt(1)))
require.Equal(t, valFmt(1), st.Get(keyFmt(1)))
// make a new from st and check
st2 := cachekv.NewStore(st)
require.Equal(t, valFmt(1), st2.Get(keyFmt(1)))
// update the value on st2, check it only effects st2
st2.Set(keyFmt(1), valFmt(3))
require.Equal(t, []byte(nil), mem.Get(keyFmt(1)))
require.Equal(t, valFmt(1), st.Get(keyFmt(1)))
require.Equal(t, valFmt(3), st2.Get(keyFmt(1)))
// st2 writes to its parent, st. doesnt effect mem
st2.Write()
require.Equal(t, []byte(nil), mem.Get(keyFmt(1)))
require.Equal(t, valFmt(3), st.Get(keyFmt(1)))
// updates mem
st.Write()
require.Equal(t, valFmt(3), mem.Get(keyFmt(1)))
}
func TestCacheKVIteratorBounds(t *testing.T) {
st := newCacheKVStore()
// set some items
nItems := 5
for i := 0; i < nItems; i++ {
st.Set(keyFmt(i), valFmt(i))
}
// iterate over all of them
itr := st.Iterator(nil, nil)
i := 0
for ; itr.Valid(); itr.Next() {
k, v := itr.Key(), itr.Value()
require.Equal(t, keyFmt(i), k)
require.Equal(t, valFmt(i), v)
i++
}
require.Equal(t, nItems, i)
require.NoError(t, itr.Close())
// iterate over none
itr = st.Iterator(bz("money"), nil)
i = 0
for ; itr.Valid(); itr.Next() {
i++
}
require.Equal(t, 0, i)
require.NoError(t, itr.Close())
// iterate over lower
itr = st.Iterator(keyFmt(0), keyFmt(3))
i = 0
for ; itr.Valid(); itr.Next() {
k, v := itr.Key(), itr.Value()
require.Equal(t, keyFmt(i), k)
require.Equal(t, valFmt(i), v)
i++
}
require.Equal(t, 3, i)
require.NoError(t, itr.Close())
// iterate over upper
itr = st.Iterator(keyFmt(2), keyFmt(4))
i = 2
for ; itr.Valid(); itr.Next() {
k, v := itr.Key(), itr.Value()
require.Equal(t, keyFmt(i), k)
require.Equal(t, valFmt(i), v)
i++
}
require.Equal(t, 4, i)
require.NoError(t, itr.Close())
}
func TestCacheKVReverseIteratorBounds(t *testing.T) {
st := newCacheKVStore()
// set some items
nItems := 5
for i := 0; i < nItems; i++ {
st.Set(keyFmt(i), valFmt(i))
}
// iterate over all of them
itr := st.ReverseIterator(nil, nil)
i := 0
for ; itr.Valid(); itr.Next() {
k, v := itr.Key(), itr.Value()
require.Equal(t, keyFmt(nItems-1-i), k)
require.Equal(t, valFmt(nItems-1-i), v)
i++
}
require.Equal(t, nItems, i)
require.NoError(t, itr.Close())
// iterate over none
itr = st.ReverseIterator(bz("money"), nil)
i = 0
for ; itr.Valid(); itr.Next() {
i++
}
require.Equal(t, 0, i)
require.NoError(t, itr.Close())
// iterate over lower
end := 3
itr = st.ReverseIterator(keyFmt(0), keyFmt(end))
i = 0
for ; itr.Valid(); itr.Next() {
i++
k, v := itr.Key(), itr.Value()
require.Equal(t, keyFmt(end-i), k)
require.Equal(t, valFmt(end-i), v)
}
require.Equal(t, 3, i)
require.NoError(t, itr.Close())
// iterate over upper
end = 4
itr = st.ReverseIterator(keyFmt(2), keyFmt(end))
i = 0
for ; itr.Valid(); itr.Next() {
i++
k, v := itr.Key(), itr.Value()
require.Equal(t, keyFmt(end-i), k)
require.Equal(t, valFmt(end-i), v)
}
require.Equal(t, 2, i)
require.NoError(t, itr.Close())
}
func TestCacheKVMergeIteratorBasics(t *testing.T) {
st := newCacheKVStore()
// set and delete an item in the cache, iterator should be empty
k, v := keyFmt(0), valFmt(0)
st.Set(k, v)
st.Delete(k)
assertIterateDomain(t, st, 0)
// now set it and assert its there
st.Set(k, v)
assertIterateDomain(t, st, 1)
// write it and assert its there
st.Write()
assertIterateDomain(t, st, 1)
// remove it in cache and assert its not
st.Delete(k)
assertIterateDomain(t, st, 0)
// write the delete and assert its not there
st.Write()
assertIterateDomain(t, st, 0)
// add two keys and assert theyre there
k1, v1 := keyFmt(1), valFmt(1)
st.Set(k, v)
st.Set(k1, v1)
assertIterateDomain(t, st, 2)
// write it and assert theyre there
st.Write()
assertIterateDomain(t, st, 2)
// remove one in cache and assert its not
st.Delete(k1)
assertIterateDomain(t, st, 1)
// write the delete and assert its not there
st.Write()
assertIterateDomain(t, st, 1)
// delete the other key in cache and asserts its empty
st.Delete(k)
assertIterateDomain(t, st, 0)
}
func TestCacheKVMergeIteratorDeleteLast(t *testing.T) {
st := newCacheKVStore()
// set some items and write them
nItems := 5
for i := 0; i < nItems; i++ {
st.Set(keyFmt(i), valFmt(i))
}
st.Write()
// set some more items and leave dirty
for i := nItems; i < nItems*2; i++ {
st.Set(keyFmt(i), valFmt(i))
}
// iterate over all of them
assertIterateDomain(t, st, nItems*2)
// delete them all
for i := 0; i < nItems*2; i++ {
last := nItems*2 - 1 - i
st.Delete(keyFmt(last))
assertIterateDomain(t, st, last)
}
}
func TestCacheKVMergeIteratorDeletes(t *testing.T) {
st := newCacheKVStore()
truth := dbm.NewMemDB()
// set some items and write them
nItems := 10
for i := 0; i < nItems; i++ {
doOp(t, st, truth, opSet, i)
}
st.Write()
// delete every other item, starting from 0
for i := 0; i < nItems; i += 2 {
doOp(t, st, truth, opDel, i)
assertIterateDomainCompare(t, st, truth)
}
// reset
st = newCacheKVStore()
truth = dbm.NewMemDB()
// set some items and write them
for i := 0; i < nItems; i++ {
doOp(t, st, truth, opSet, i)
}
st.Write()
// delete every other item, starting from 1
for i := 1; i < nItems; i += 2 {
doOp(t, st, truth, opDel, i)
assertIterateDomainCompare(t, st, truth)
}
}
func TestCacheKVMergeIteratorChunks(t *testing.T) {
st := newCacheKVStore()
// Use the truth to check values on the merge iterator
truth := dbm.NewMemDB()
// sets to the parent
setRange(t, st, truth, 0, 20)
setRange(t, st, truth, 40, 60)
st.Write()
// sets to the cache
setRange(t, st, truth, 20, 40)
setRange(t, st, truth, 60, 80)
assertIterateDomainCheck(t, st, truth, []keyRange{{0, 80}})
// remove some parents and some cache
deleteRange(t, st, truth, 15, 25)
assertIterateDomainCheck(t, st, truth, []keyRange{{0, 15}, {25, 80}})
// remove some parents and some cache
deleteRange(t, st, truth, 35, 45)
assertIterateDomainCheck(t, st, truth, []keyRange{{0, 15}, {25, 35}, {45, 80}})
// write, add more to the cache, and delete some cache
st.Write()
setRange(t, st, truth, 38, 42)
deleteRange(t, st, truth, 40, 43)
assertIterateDomainCheck(t, st, truth, []keyRange{{0, 15}, {25, 35}, {38, 40}, {45, 80}})
}
func TestCacheKVMergeIteratorDomain(t *testing.T) {
st := newCacheKVStore()
itr := st.Iterator(nil, nil)
start, end := itr.Domain()
require.Equal(t, start, end)
require.NoError(t, itr.Close())
itr = st.Iterator(keyFmt(40), keyFmt(60))
start, end = itr.Domain()
require.Equal(t, keyFmt(40), start)
require.Equal(t, keyFmt(60), end)
require.NoError(t, itr.Close())
start, end = st.ReverseIterator(keyFmt(0), keyFmt(80)).Domain()
require.Equal(t, keyFmt(0), start)
require.Equal(t, keyFmt(80), end)
}
func TestCacheKVMergeIteratorRandom(t *testing.T) {
st := newCacheKVStore()
truth := dbm.NewMemDB()
start, end := 25, 975
max := 1000
setRange(t, st, truth, start, end)
// do an op, test the iterator
for i := 0; i < 2000; i++ {
doRandomOp(t, st, truth, max)
assertIterateDomainCompare(t, st, truth)
}
}
func TestNilEndIterator(t *testing.T) {
const SIZE = 3000
tests := []struct {
name string
write bool
startIndex int
end []byte
}{
{name: "write=false, end=nil", write: false, end: nil, startIndex: 1000},
{name: "write=false, end=nil; full key scan", write: false, end: nil, startIndex: 2000},
{name: "write=true, end=nil", write: true, end: nil, startIndex: 1000},
{name: "write=false, end=non-nil", write: false, end: keyFmt(3000), startIndex: 1000},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
st := newCacheKVStore()
for i := 0; i < SIZE; i++ {
kstr := keyFmt(i)
st.Set(kstr, valFmt(i))
}
if tt.write {
st.Write()
}
itr := st.Iterator(keyFmt(tt.startIndex), tt.end)
i := tt.startIndex
j := 0
for itr.Valid() {
require.Equal(t, keyFmt(i), itr.Key())
require.Equal(t, valFmt(i), itr.Value())
itr.Next()
i++
j++
}
require.Equal(t, SIZE-tt.startIndex, j)
require.NoError(t, itr.Close())
})
}
}
// TestIteratorDeadlock demonstrate the deadlock issue in cache store.
func TestIteratorDeadlock(t *testing.T) {
mem := dbadapter.Store{DB: dbm.NewMemDB()}
store := cachekv.NewStore(mem)
// the channel buffer is 64 and received once, so put at least 66 elements.
for i := 0; i < 66; i++ {
store.Set([]byte(fmt.Sprintf("key%d", i)), []byte{1})
}
it := store.Iterator(nil, nil)
defer it.Close()
store.Set([]byte("key20"), []byte{1})
// it'll be blocked here with previous version, or enable lock on btree.
it2 := store.Iterator(nil, nil)
defer it2.Close()
}
//-------------------------------------------------------------------------------------------
// do some random ops
const (
opSet = 0
opSetRange = 1
opDel = 2
opDelRange = 3
opWrite = 4
totalOps = 5 // number of possible operations
)
func randInt(n int) int {
return unsafe.NewRand().Int() % n
}
// useful for replaying a error case if we find one
func doOp(t *testing.T, st types.CacheKVStore, truth dbm.DB, op int, args ...int) {
t.Helper()
switch op {
case opSet:
k := args[0]
st.Set(keyFmt(k), valFmt(k))
err := truth.Set(keyFmt(k), valFmt(k))
require.NoError(t, err)
case opSetRange:
start := args[0]
end := args[1]
setRange(t, st, truth, start, end)
case opDel:
k := args[0]
st.Delete(keyFmt(k))
err := truth.Delete(keyFmt(k))
require.NoError(t, err)
case opDelRange:
start := args[0]
end := args[1]
deleteRange(t, st, truth, start, end)
case opWrite:
st.Write()
}
}
func doRandomOp(t *testing.T, st types.CacheKVStore, truth dbm.DB, maxKey int) {
t.Helper()
r := randInt(totalOps)
switch r {
case opSet:
k := randInt(maxKey)
st.Set(keyFmt(k), valFmt(k))
err := truth.Set(keyFmt(k), valFmt(k))
require.NoError(t, err)
case opSetRange:
start := randInt(maxKey - 2)
end := randInt(maxKey-start) + start
setRange(t, st, truth, start, end)
case opDel:
k := randInt(maxKey)
st.Delete(keyFmt(k))
err := truth.Delete(keyFmt(k))
require.NoError(t, err)
case opDelRange:
start := randInt(maxKey - 2)
end := randInt(maxKey-start) + start
deleteRange(t, st, truth, start, end)
case opWrite:
st.Write()
}
}
//-------------------------------------------------------------------------------------------
// iterate over whole domain
func assertIterateDomain(t *testing.T, st types.KVStore, expectedN int) {
t.Helper()
itr := st.Iterator(nil, nil)
i := 0
for ; itr.Valid(); itr.Next() {
k, v := itr.Key(), itr.Value()
require.Equal(t, keyFmt(i), k)
require.Equal(t, valFmt(i), v)
i++
}
require.Equal(t, expectedN, i)
require.NoError(t, itr.Close())
}
func assertIterateDomainCheck(t *testing.T, st types.KVStore, mem dbm.DB, r []keyRange) {
t.Helper()
// iterate over each and check they match the other
itr := st.Iterator(nil, nil)
itr2, err := mem.Iterator(nil, nil) // ground truth
require.NoError(t, err)
krc := newKeyRangeCounter(r)
i := 0
for ; krc.valid(); krc.next() {
require.True(t, itr.Valid())
require.True(t, itr2.Valid())
// check the key/val matches the ground truth
k, v := itr.Key(), itr.Value()
k2, v2 := itr2.Key(), itr2.Value()
require.Equal(t, k, k2)
require.Equal(t, v, v2)
// check they match the counter
require.Equal(t, k, keyFmt(krc.key()))
itr.Next()
itr2.Next()
i++
}
require.False(t, itr.Valid())
require.False(t, itr2.Valid())
require.NoError(t, itr.Close())
require.NoError(t, itr2.Close())
}
func assertIterateDomainCompare(t *testing.T, st types.KVStore, mem dbm.DB) {
t.Helper()
// iterate over each and check they match the other
itr := st.Iterator(nil, nil)
itr2, err := mem.Iterator(nil, nil) // ground truth
require.NoError(t, err)
checkIterators(t, itr, itr2)
checkIterators(t, itr2, itr)
require.NoError(t, itr.Close())
require.NoError(t, itr2.Close())
}
func checkIterators(t *testing.T, itr, itr2 types.Iterator) {
t.Helper()
for ; itr.Valid(); itr.Next() {
require.True(t, itr2.Valid())
k, v := itr.Key(), itr.Value()
k2, v2 := itr2.Key(), itr2.Value()
require.Equal(t, k, k2)
require.Equal(t, v, v2)
itr2.Next()
}
require.False(t, itr.Valid())
require.False(t, itr2.Valid())
}
//--------------------------------------------------------
func setRange(t *testing.T, st types.KVStore, mem dbm.DB, start, end int) {
t.Helper()
for i := start; i < end; i++ {
st.Set(keyFmt(i), valFmt(i))
err := mem.Set(keyFmt(i), valFmt(i))
require.NoError(t, err)
}
}
func deleteRange(t *testing.T, st types.KVStore, mem dbm.DB, start, end int) {
t.Helper()
for i := start; i < end; i++ {
st.Delete(keyFmt(i))
err := mem.Delete(keyFmt(i))
require.NoError(t, err)
}
}
//--------------------------------------------------------
type keyRange struct {
start int
end int
}
func (kr keyRange) len() int {
return kr.end - kr.start
}
func newKeyRangeCounter(kr []keyRange) *keyRangeCounter {
return &keyRangeCounter{keyRanges: kr}
}
// we can iterate over this and make sure our real iterators have all the right keys
type keyRangeCounter struct {
rangeIdx int
idx int
keyRanges []keyRange
}
func (krc *keyRangeCounter) valid() bool {
maxRangeIdx := len(krc.keyRanges) - 1
maxRange := krc.keyRanges[maxRangeIdx]
// if we're not in the max range, we're valid
if krc.rangeIdx <= maxRangeIdx &&
krc.idx < maxRange.len() {
return true
}
return false
}
func (krc *keyRangeCounter) next() {
thisKeyRange := krc.keyRanges[krc.rangeIdx]
if krc.idx == thisKeyRange.len()-1 {
krc.rangeIdx++
krc.idx = 0
} else {
krc.idx++
}
}
func (krc *keyRangeCounter) key() int {
thisKeyRange := krc.keyRanges[krc.rangeIdx]
return thisKeyRange.start + krc.idx
}
//--------------------------------------------------------
func bz(s string) []byte { return []byte(s) }
func BenchmarkCacheKVStoreGetNoKeyFound(b *testing.B) {
b.ReportAllocs()
st := newCacheKVStore()
b.ResetTimer()
// assumes b.N < 2**24
for i := 0; i < b.N; i++ {
st.Get([]byte{byte((i & 0xFF0000) >> 16), byte((i & 0xFF00) >> 8), byte(i & 0xFF)})
}
}
func BenchmarkCacheKVStoreGetKeyFound(b *testing.B) {
b.ReportAllocs()
st := newCacheKVStore()
for i := 0; i < b.N; i++ {
arr := []byte{byte((i & 0xFF0000) >> 16), byte((i & 0xFF00) >> 8), byte(i & 0xFF)}
st.Set(arr, arr)
}
b.ResetTimer()
// assumes b.N < 2**24
for i := 0; i < b.N; i++ {
st.Get([]byte{byte((i & 0xFF0000) >> 16), byte((i & 0xFF00) >> 8), byte(i & 0xFF)})
}
}

170
store/cachemulti/store.go Normal file
View File

@ -0,0 +1,170 @@
package cachemulti
import (
"fmt"
"io"
dbm "github.com/cosmos/cosmos-db"
"cosmossdk.io/store/cachekv"
"cosmossdk.io/store/dbadapter"
"cosmossdk.io/store/tracekv"
"cosmossdk.io/store/types"
)
// storeNameCtxKey is the TraceContext metadata key that identifies
// the store which emitted a given trace.
const storeNameCtxKey = "store_name"
//----------------------------------------
// Store
// Store holds many branched stores.
// Implements MultiStore.
// NOTE: a Store (and MultiStores in general) should never expose the
// keys for the substores.
type Store struct {
db types.CacheKVStore
stores map[types.StoreKey]types.CacheWrap
keys map[string]types.StoreKey
traceWriter io.Writer
traceContext types.TraceContext
}
var _ types.CacheMultiStore = Store{}
// NewFromKVStore creates a new Store object from a mapping of store keys to
// CacheWrapper objects and a KVStore as the database. Each CacheWrapper store
// is a branched store.
func NewFromKVStore(
store types.KVStore, stores map[types.StoreKey]types.CacheWrapper,
keys map[string]types.StoreKey, traceWriter io.Writer, traceContext types.TraceContext,
) Store {
cms := Store{
db: cachekv.NewStore(store),
stores: make(map[types.StoreKey]types.CacheWrap, len(stores)),
keys: keys,
traceWriter: traceWriter,
traceContext: traceContext,
}
for key, store := range stores {
if cms.TracingEnabled() {
tctx := cms.traceContext.Clone().Merge(types.TraceContext{
storeNameCtxKey: key.Name(),
})
store = tracekv.NewStore(store.(types.KVStore), cms.traceWriter, tctx)
}
cms.stores[key] = cachekv.NewStore(store.(types.KVStore))
}
return cms
}
// NewStore creates a new Store object from a mapping of store keys to
// CacheWrapper objects. Each CacheWrapper store is a branched store.
func NewStore(
db dbm.DB, stores map[types.StoreKey]types.CacheWrapper, keys map[string]types.StoreKey,
traceWriter io.Writer, traceContext types.TraceContext,
) Store {
return NewFromKVStore(dbadapter.Store{DB: db}, stores, keys, traceWriter, traceContext)
}
func newCacheMultiStoreFromCMS(cms Store) Store {
stores := make(map[types.StoreKey]types.CacheWrapper)
for k, v := range cms.stores {
stores[k] = v
}
return NewFromKVStore(cms.db, stores, nil, cms.traceWriter, cms.traceContext)
}
// SetTracer sets the tracer for the MultiStore that the underlying
// stores will utilize to trace operations. A MultiStore is returned.
func (cms Store) SetTracer(w io.Writer) types.MultiStore {
cms.traceWriter = w
return cms
}
// SetTracingContext updates the tracing context for the MultiStore by merging
// the given context with the existing context by key. Any existing keys will
// be overwritten. It is implied that the caller should update the context when
// necessary between tracing operations. It returns a modified MultiStore.
func (cms Store) SetTracingContext(tc types.TraceContext) types.MultiStore {
if cms.traceContext != nil {
for k, v := range tc {
cms.traceContext[k] = v
}
} else {
cms.traceContext = tc
}
return cms
}
// TracingEnabled returns if tracing is enabled for the MultiStore.
func (cms Store) TracingEnabled() bool {
return cms.traceWriter != nil
}
// LatestVersion returns the branch version of the store
func (cms Store) LatestVersion() int64 {
panic("cannot get latest version from branch cached multi-store")
}
// GetStoreType returns the type of the store.
func (cms Store) GetStoreType() types.StoreType {
return types.StoreTypeMulti
}
// Write calls Write on each underlying store.
func (cms Store) Write() {
cms.db.Write()
for _, store := range cms.stores {
store.Write()
}
}
// Implements CacheWrapper.
func (cms Store) CacheWrap() types.CacheWrap {
return cms.CacheMultiStore().(types.CacheWrap)
}
// CacheWrapWithTrace implements the CacheWrapper interface.
func (cms Store) CacheWrapWithTrace(_ io.Writer, _ types.TraceContext) types.CacheWrap {
return cms.CacheWrap()
}
// Implements MultiStore.
func (cms Store) CacheMultiStore() types.CacheMultiStore {
return newCacheMultiStoreFromCMS(cms)
}
// CacheMultiStoreWithVersion implements the MultiStore interface. It will panic
// as an already cached multi-store cannot load previous versions.
//
// TODO: The store implementation can possibly be modified to support this as it
// seems safe to load previous versions (heights).
func (cms Store) CacheMultiStoreWithVersion(_ int64) (types.CacheMultiStore, error) {
panic("cannot branch cached multi-store with a version")
}
// GetStore returns an underlying Store by key.
func (cms Store) GetStore(key types.StoreKey) types.Store {
s := cms.stores[key]
if key == nil || s == nil {
panic(fmt.Sprintf("kv store with key %v has not been registered in stores", key))
}
return s.(types.Store)
}
// GetKVStore returns an underlying KVStore by key.
func (cms Store) GetKVStore(key types.StoreKey) types.KVStore {
store := cms.stores[key]
if key == nil || store == nil {
panic(fmt.Sprintf("kv store with key %v has not been registered in stores", key))
}
return store.(types.KVStore)
}

View File

@ -0,0 +1,24 @@
package cachemulti
import (
"fmt"
"testing"
"github.com/stretchr/testify/require"
"cosmossdk.io/store/types"
)
func TestStoreGetKVStore(t *testing.T) {
require := require.New(t)
s := Store{stores: map[types.StoreKey]types.CacheWrap{}}
key := types.NewKVStoreKey("abc")
errMsg := fmt.Sprintf("kv store with key %v has not been registered in stores", key)
require.PanicsWithValue(errMsg,
func() { s.GetStore(key) })
require.PanicsWithValue(errMsg,
func() { s.GetKVStore(key) })
}

90
store/dbadapter/store.go Normal file
View File

@ -0,0 +1,90 @@
package dbadapter
import (
"io"
dbm "github.com/cosmos/cosmos-db"
"cosmossdk.io/store/cachekv"
"cosmossdk.io/store/tracekv"
"cosmossdk.io/store/types"
)
// Wrapper type for dbm.Db with implementation of KVStore
type Store struct {
dbm.DB
}
// Get wraps the underlying DB's Get method panicing on error.
func (dsa Store) Get(key []byte) []byte {
v, err := dsa.DB.Get(key)
if err != nil {
panic(err)
}
return v
}
// Has wraps the underlying DB's Has method panicing on error.
func (dsa Store) Has(key []byte) bool {
ok, err := dsa.DB.Has(key)
if err != nil {
panic(err)
}
return ok
}
// Set wraps the underlying DB's Set method panicing on error.
func (dsa Store) Set(key, value []byte) {
types.AssertValidKey(key)
types.AssertValidValue(value)
if err := dsa.DB.Set(key, value); err != nil {
panic(err)
}
}
// Delete wraps the underlying DB's Delete method panicing on error.
func (dsa Store) Delete(key []byte) {
if err := dsa.DB.Delete(key); err != nil {
panic(err)
}
}
// Iterator wraps the underlying DB's Iterator method panicing on error.
func (dsa Store) Iterator(start, end []byte) types.Iterator {
iter, err := dsa.DB.Iterator(start, end)
if err != nil {
panic(err)
}
return iter
}
// ReverseIterator wraps the underlying DB's ReverseIterator method panicing on error.
func (dsa Store) ReverseIterator(start, end []byte) types.Iterator {
iter, err := dsa.DB.ReverseIterator(start, end)
if err != nil {
panic(err)
}
return iter
}
// GetStoreType returns the type of the store.
func (Store) GetStoreType() types.StoreType {
return types.StoreTypeDB
}
// CacheWrap branches the underlying store.
func (dsa Store) CacheWrap() types.CacheWrap {
return cachekv.NewStore(dsa)
}
// CacheWrapWithTrace implements KVStore.
func (dsa Store) CacheWrapWithTrace(w io.Writer, tc types.TraceContext) types.CacheWrap {
return cachekv.NewStore(tracekv.NewStore(dsa, w, tc))
}
// dbm.DB implements KVStore so we can CacheKVStore it.
var _ types.KVStore = Store{}

View File

@ -0,0 +1,86 @@
package dbadapter_test
import (
"bytes"
"errors"
"testing"
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/require"
"cosmossdk.io/store/cachekv"
"cosmossdk.io/store/dbadapter"
"cosmossdk.io/store/mock"
"cosmossdk.io/store/types"
)
var errFoo = errors.New("dummy")
func TestAccessors(t *testing.T) {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
mockDB := mock.NewMockDB(mockCtrl)
store := dbadapter.Store{mockDB}
key := []byte("test")
value := []byte("testvalue")
require.Panics(t, func() { store.Set(nil, []byte("value")) }, "setting a nil key should panic")
require.Panics(t, func() { store.Set([]byte(""), []byte("value")) }, "setting an empty key should panic")
require.Equal(t, types.StoreTypeDB, store.GetStoreType())
store.GetStoreType()
retFoo := []byte("xxx")
mockDB.EXPECT().Get(gomock.Eq(key)).Times(1).Return(retFoo, nil)
require.True(t, bytes.Equal(retFoo, store.Get(key)))
mockDB.EXPECT().Get(gomock.Eq(key)).Times(1).Return(nil, errFoo)
require.Panics(t, func() { store.Get(key) })
mockDB.EXPECT().Has(gomock.Eq(key)).Times(1).Return(true, nil)
require.True(t, store.Has(key))
mockDB.EXPECT().Has(gomock.Eq(key)).Times(1).Return(false, nil)
require.False(t, store.Has(key))
mockDB.EXPECT().Has(gomock.Eq(key)).Times(1).Return(false, errFoo)
require.Panics(t, func() { store.Has(key) })
mockDB.EXPECT().Set(gomock.Eq(key), gomock.Eq(value)).Times(1).Return(nil)
require.NotPanics(t, func() { store.Set(key, value) })
mockDB.EXPECT().Set(gomock.Eq(key), gomock.Eq(value)).Times(1).Return(errFoo)
require.Panics(t, func() { store.Set(key, value) })
mockDB.EXPECT().Delete(gomock.Eq(key)).Times(1).Return(nil)
require.NotPanics(t, func() { store.Delete(key) })
mockDB.EXPECT().Delete(gomock.Eq(key)).Times(1).Return(errFoo)
require.Panics(t, func() { store.Delete(key) })
start, end := []byte("start"), []byte("end")
mockDB.EXPECT().Iterator(gomock.Eq(start), gomock.Eq(end)).Times(1).Return(nil, nil)
require.NotPanics(t, func() { store.Iterator(start, end) })
mockDB.EXPECT().Iterator(gomock.Eq(start), gomock.Eq(end)).Times(1).Return(nil, errFoo)
require.Panics(t, func() { store.Iterator(start, end) })
mockDB.EXPECT().ReverseIterator(gomock.Eq(start), gomock.Eq(end)).Times(1).Return(nil, nil)
require.NotPanics(t, func() { store.ReverseIterator(start, end) })
mockDB.EXPECT().ReverseIterator(gomock.Eq(start), gomock.Eq(end)).Times(1).Return(nil, errFoo)
require.Panics(t, func() { store.ReverseIterator(start, end) })
}
func TestCacheWraps(t *testing.T) {
mockCtrl := gomock.NewController(t)
mockDB := mock.NewMockDB(mockCtrl)
store := dbadapter.Store{mockDB}
cacheWrapper := store.CacheWrap()
require.IsType(t, &cachekv.Store{}, cacheWrapper)
cacheWrappedWithTrace := store.CacheWrapWithTrace(nil, nil)
require.IsType(t, &cachekv.Store{}, cacheWrappedWithTrace)
}

176
store/gaskv/store.go Normal file
View File

@ -0,0 +1,176 @@
package gaskv
import (
"io"
"cosmossdk.io/store/types"
)
var _ types.KVStore = &Store{}
// Store applies gas tracking to an underlying KVStore. It implements the
// KVStore interface.
type Store struct {
gasMeter types.GasMeter
gasConfig types.GasConfig
parent types.KVStore
}
// NewStore returns a reference to a new GasKVStore.
func NewStore(parent types.KVStore, gasMeter types.GasMeter, gasConfig types.GasConfig) *Store {
kvs := &Store{
gasMeter: gasMeter,
gasConfig: gasConfig,
parent: parent,
}
return kvs
}
// Implements Store.
func (gs *Store) GetStoreType() types.StoreType {
return gs.parent.GetStoreType()
}
// Implements KVStore.
func (gs *Store) Get(key []byte) (value []byte) {
gs.gasMeter.ConsumeGas(gs.gasConfig.ReadCostFlat, types.GasReadCostFlatDesc)
value = gs.parent.Get(key)
// TODO overflow-safe math?
gs.gasMeter.ConsumeGas(gs.gasConfig.ReadCostPerByte*types.Gas(len(key)), types.GasReadPerByteDesc)
gs.gasMeter.ConsumeGas(gs.gasConfig.ReadCostPerByte*types.Gas(len(value)), types.GasReadPerByteDesc)
return value
}
// Implements KVStore.
func (gs *Store) Set(key, value []byte) {
types.AssertValidKey(key)
types.AssertValidValue(value)
gs.gasMeter.ConsumeGas(gs.gasConfig.WriteCostFlat, types.GasWriteCostFlatDesc)
// TODO overflow-safe math?
gs.gasMeter.ConsumeGas(gs.gasConfig.WriteCostPerByte*types.Gas(len(key)), types.GasWritePerByteDesc)
gs.gasMeter.ConsumeGas(gs.gasConfig.WriteCostPerByte*types.Gas(len(value)), types.GasWritePerByteDesc)
gs.parent.Set(key, value)
}
// Implements KVStore.
func (gs *Store) Has(key []byte) bool {
gs.gasMeter.ConsumeGas(gs.gasConfig.HasCost, types.GasHasDesc)
return gs.parent.Has(key)
}
// Implements KVStore.
func (gs *Store) Delete(key []byte) {
// charge gas to prevent certain attack vectors even though space is being freed
gs.gasMeter.ConsumeGas(gs.gasConfig.DeleteCost, types.GasDeleteDesc)
gs.parent.Delete(key)
}
// Iterator implements the KVStore interface. It returns an iterator which
// incurs a flat gas cost for seeking to the first key/value pair and a variable
// gas cost based on the current value's length if the iterator is valid.
func (gs *Store) Iterator(start, end []byte) types.Iterator {
return gs.iterator(start, end, true)
}
// ReverseIterator implements the KVStore interface. It returns a reverse
// iterator which incurs a flat gas cost for seeking to the first key/value pair
// and a variable gas cost based on the current value's length if the iterator
// is valid.
func (gs *Store) ReverseIterator(start, end []byte) types.Iterator {
return gs.iterator(start, end, false)
}
// Implements KVStore.
func (gs *Store) CacheWrap() types.CacheWrap {
panic("cannot CacheWrap a GasKVStore")
}
// CacheWrapWithTrace implements the KVStore interface.
func (gs *Store) CacheWrapWithTrace(_ io.Writer, _ types.TraceContext) types.CacheWrap {
panic("cannot CacheWrapWithTrace a GasKVStore")
}
func (gs *Store) iterator(start, end []byte, ascending bool) types.Iterator {
var parent types.Iterator
if ascending {
parent = gs.parent.Iterator(start, end)
} else {
parent = gs.parent.ReverseIterator(start, end)
}
gi := newGasIterator(gs.gasMeter, gs.gasConfig, parent)
gi.(*gasIterator).consumeSeekGas()
return gi
}
type gasIterator struct {
gasMeter types.GasMeter
gasConfig types.GasConfig
parent types.Iterator
}
func newGasIterator(gasMeter types.GasMeter, gasConfig types.GasConfig, parent types.Iterator) types.Iterator {
return &gasIterator{
gasMeter: gasMeter,
gasConfig: gasConfig,
parent: parent,
}
}
// Implements Iterator.
func (gi *gasIterator) Domain() (start, end []byte) {
return gi.parent.Domain()
}
// Implements Iterator.
func (gi *gasIterator) Valid() bool {
return gi.parent.Valid()
}
// Next implements the Iterator interface. It seeks to the next key/value pair
// in the iterator. It incurs a flat gas cost for seeking and a variable gas
// cost based on the current value's length if the iterator is valid.
func (gi *gasIterator) Next() {
gi.consumeSeekGas()
gi.parent.Next()
}
// Key implements the Iterator interface. It returns the current key and it does
// not incur any gas cost.
func (gi *gasIterator) Key() (key []byte) {
key = gi.parent.Key()
return key
}
// Value implements the Iterator interface. It returns the current value and it
// does not incur any gas cost.
func (gi *gasIterator) Value() (value []byte) {
value = gi.parent.Value()
return value
}
// Implements Iterator.
func (gi *gasIterator) Close() error {
return gi.parent.Close()
}
// Error delegates the Error call to the parent iterator.
func (gi *gasIterator) Error() error {
return gi.parent.Error()
}
// consumeSeekGas consumes on each iteration step a flat gas cost and a variable gas cost
// based on the current value's length.
func (gi *gasIterator) consumeSeekGas() {
if gi.Valid() {
key := gi.Key()
value := gi.Value()
gi.gasMeter.ConsumeGas(gi.gasConfig.ReadCostPerByte*types.Gas(len(key)), types.GasValuePerByteDesc)
gi.gasMeter.ConsumeGas(gi.gasConfig.ReadCostPerByte*types.Gas(len(value)), types.GasValuePerByteDesc)
}
gi.gasMeter.ConsumeGas(gi.gasConfig.IterNextCostFlat, types.GasIterNextCostFlatDesc)
}

120
store/gaskv/store_test.go Normal file
View File

@ -0,0 +1,120 @@
package gaskv_test
import (
"fmt"
"testing"
dbm "github.com/cosmos/cosmos-db"
"github.com/stretchr/testify/require"
"cosmossdk.io/store/dbadapter"
"cosmossdk.io/store/gaskv"
"cosmossdk.io/store/types"
)
func bz(s string) []byte { return []byte(s) }
func keyFmt(i int) []byte { return bz(fmt.Sprintf("key%0.8d", i)) }
func valFmt(i int) []byte { return bz(fmt.Sprintf("value%0.8d", i)) }
func TestGasKVStoreBasic(t *testing.T) {
mem := dbadapter.Store{DB: dbm.NewMemDB()}
meter := types.NewGasMeter(10000)
st := gaskv.NewStore(mem, meter, types.KVGasConfig())
require.Equal(t, types.StoreTypeDB, st.GetStoreType())
require.Panics(t, func() { st.CacheWrap() })
require.Panics(t, func() { st.CacheWrapWithTrace(nil, nil) })
require.Panics(t, func() { st.Set(nil, []byte("value")) }, "setting a nil key should panic")
require.Panics(t, func() { st.Set([]byte(""), []byte("value")) }, "setting an empty key should panic")
require.Empty(t, st.Get(keyFmt(1)), "Expected `key1` to be empty")
st.Set(keyFmt(1), valFmt(1))
require.Equal(t, valFmt(1), st.Get(keyFmt(1)))
st.Delete(keyFmt(1))
require.Empty(t, st.Get(keyFmt(1)), "Expected `key1` to be empty")
require.Equal(t, meter.GasConsumed(), types.Gas(6858))
}
func TestGasKVStoreIterator(t *testing.T) {
mem := dbadapter.Store{DB: dbm.NewMemDB()}
meter := types.NewGasMeter(100000)
st := gaskv.NewStore(mem, meter, types.KVGasConfig())
require.False(t, st.Has(keyFmt(1)))
require.Empty(t, st.Get(keyFmt(1)), "Expected `key1` to be empty")
require.Empty(t, st.Get(keyFmt(2)), "Expected `key2` to be empty")
require.Empty(t, st.Get(keyFmt(3)), "Expected `key3` to be empty")
st.Set(keyFmt(1), valFmt(1))
require.True(t, st.Has(keyFmt(1)))
st.Set(keyFmt(2), valFmt(2))
require.True(t, st.Has(keyFmt(2)))
st.Set(keyFmt(3), valFmt(0))
iterator := st.Iterator(nil, nil)
start, end := iterator.Domain()
require.Nil(t, start)
require.Nil(t, end)
require.NoError(t, iterator.Error())
t.Cleanup(func() {
if err := iterator.Close(); err != nil {
t.Fatal(err)
}
})
ka := iterator.Key()
require.Equal(t, ka, keyFmt(1))
va := iterator.Value()
require.Equal(t, va, valFmt(1))
iterator.Next()
kb := iterator.Key()
require.Equal(t, kb, keyFmt(2))
vb := iterator.Value()
require.Equal(t, vb, valFmt(2))
iterator.Next()
require.Equal(t, types.Gas(14565), meter.GasConsumed())
kc := iterator.Key()
require.Equal(t, kc, keyFmt(3))
vc := iterator.Value()
require.Equal(t, vc, valFmt(0))
iterator.Next()
require.Equal(t, types.Gas(14667), meter.GasConsumed())
require.False(t, iterator.Valid())
require.Panics(t, iterator.Next)
require.Equal(t, types.Gas(14697), meter.GasConsumed())
require.NoError(t, iterator.Error())
reverseIterator := st.ReverseIterator(nil, nil)
t.Cleanup(func() {
if err := reverseIterator.Close(); err != nil {
t.Fatal(err)
}
})
require.Equal(t, reverseIterator.Key(), keyFmt(3))
reverseIterator.Next()
require.Equal(t, reverseIterator.Key(), keyFmt(2))
reverseIterator.Next()
require.Equal(t, reverseIterator.Key(), keyFmt(1))
reverseIterator.Next()
require.False(t, reverseIterator.Valid())
require.Panics(t, reverseIterator.Next)
require.Equal(t, types.Gas(15135), meter.GasConsumed())
}
func TestGasKVStoreOutOfGasSet(t *testing.T) {
mem := dbadapter.Store{DB: dbm.NewMemDB()}
meter := types.NewGasMeter(0)
st := gaskv.NewStore(mem, meter, types.KVGasConfig())
require.Panics(t, func() { st.Set(keyFmt(1), valFmt(1)) }, "Expected out-of-gas")
}
func TestGasKVStoreOutOfGasIterator(t *testing.T) {
mem := dbadapter.Store{DB: dbm.NewMemDB()}
meter := types.NewGasMeter(20000)
st := gaskv.NewStore(mem, meter, types.KVGasConfig())
st.Set(keyFmt(1), valFmt(1))
iterator := st.Iterator(nil, nil)
iterator.Next()
require.Panics(t, func() { iterator.Value() }, "Expected out-of-gas")
}

View File

@ -1,72 +1,78 @@
module cosmossdk.io/store/v2
module cosmossdk.io/store
go 1.21
require (
cosmossdk.io/core v0.12.0
cosmossdk.io/errors v1.0.1
cosmossdk.io/errors v1.0.0
cosmossdk.io/log v1.3.1
github.com/cockroachdb/errors v1.11.1
github.com/cockroachdb/pebble v1.1.0
cosmossdk.io/math v1.3.0
github.com/cometbft/cometbft v0.38.7
github.com/cosmos/cosmos-db v1.0.2
github.com/cosmos/gogoproto v1.4.12
github.com/cosmos/iavl v1.1.1
github.com/cosmos/iavl v1.1.2
github.com/cosmos/ics23/go v0.10.0
github.com/google/btree v1.1.2
github.com/hashicorp/go-metrics v0.5.3
github.com/linxGnu/grocksdb v1.8.14
github.com/mattn/go-sqlite3 v1.14.22
github.com/spf13/cast v1.6.0
github.com/golang/mock v1.6.0
github.com/golang/protobuf v1.5.4 // indirect
github.com/hashicorp/go-hclog v1.5.0
github.com/hashicorp/go-metrics v0.5.1
github.com/hashicorp/go-plugin v1.5.2
github.com/hashicorp/golang-lru v1.0.2
github.com/spf13/cast v1.6.0 // indirect
github.com/stretchr/testify v1.9.0
github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d
golang.org/x/sync v0.7.0
github.com/tidwall/btree v1.7.0
golang.org/x/exp v0.0.0-20240404231335-c0f41cb1a7a0
google.golang.org/grpc v1.61.1
google.golang.org/protobuf v1.33.0
gotest.tools/v3 v3.5.1
)
require (
github.com/DataDog/zstd v1.5.5 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/btcsuite/btcd/btcec/v2 v2.3.2 // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/cockroachdb/errors v1.11.1 // indirect
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect
github.com/cockroachdb/pebble v1.1.0 // indirect
github.com/cockroachdb/redact v1.1.5 // indirect
github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 // indirect
github.com/cosmos/cosmos-db v1.0.2 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 // indirect
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect
github.com/emicklei/dot v1.6.1 // indirect
github.com/fatih/color v1.15.0 // indirect
github.com/getsentry/sentry-go v0.27.0 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/protobuf v1.5.4 // indirect
github.com/golang/snappy v0.0.4 // indirect
github.com/google/btree v1.1.2 // indirect
github.com/google/go-cmp v0.6.0 // indirect
github.com/hashicorp/go-immutable-radix v1.0.0 // indirect
github.com/hashicorp/go-uuid v1.0.1 // indirect
github.com/hashicorp/golang-lru v1.0.2 // indirect
github.com/hashicorp/yamux v0.1.1 // indirect
github.com/jhump/protoreflect v1.15.3 // indirect
github.com/klauspost/compress v1.17.7 // indirect
github.com/kr/pretty v0.3.1 // indirect
github.com/kr/text v0.2.0 // indirect
github.com/linxGnu/grocksdb v1.8.14 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/mitchellh/go-testing-interface v1.14.1 // indirect
github.com/oasisprotocol/curve25519-voi v0.0.0-20220708102147-0a8a51822cae // indirect
github.com/oklog/run v1.1.0 // indirect
github.com/petermattis/goid v0.0.0-20221215004737-a150e88a970d // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/prometheus/client_golang v1.19.0 // indirect
github.com/prometheus/client_model v0.6.0 // indirect
github.com/prometheus/common v0.53.0 // indirect
github.com/prometheus/client_model v0.6.1 // indirect
github.com/prometheus/common v0.52.2 // indirect
github.com/prometheus/procfs v0.13.0 // indirect
github.com/rogpeppe/go-internal v1.12.0 // indirect
github.com/rs/zerolog v1.32.0 // indirect
github.com/sasha-s/go-deadlock v0.3.1 // indirect
github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d // indirect
golang.org/x/crypto v0.22.0 // indirect
golang.org/x/exp v0.0.0-20240314144324-c7f7c6466f7f // indirect
golang.org/x/net v0.24.0 // indirect
golang.org/x/net v0.23.0 // indirect
golang.org/x/sys v0.19.0 // indirect
golang.org/x/text v0.14.0 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda // indirect
google.golang.org/grpc v1.63.2 // indirect
google.golang.org/protobuf v1.34.0 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20231212172506-995d672761c0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)
replace cosmossdk.io/core => ../core

View File

@ -1,7 +1,9 @@
cosmossdk.io/errors v1.0.1 h1:bzu+Kcr0kS/1DuPBtUFdWjzLqyUuCiyHjyJB6srBV/0=
cosmossdk.io/errors v1.0.1/go.mod h1:MeelVSZThMi4bEakzhhhE/CKqVv3nOJDA25bIqRDu/U=
cosmossdk.io/errors v1.0.0 h1:nxF07lmlBbB8NKQhtJ+sJm6ef5uV1XkvPXG2bUntb04=
cosmossdk.io/errors v1.0.0/go.mod h1:+hJZLuhdDE0pYN8HkOrVNwrIOYvUGnn6+4fjnJs/oV0=
cosmossdk.io/log v1.3.1 h1:UZx8nWIkfbbNEWusZqzAx3ZGvu54TZacWib3EzUYmGI=
cosmossdk.io/log v1.3.1/go.mod h1:2/dIomt8mKdk6vl3OWJcPk2be3pGOS8OQaLUM/3/tCM=
cosmossdk.io/math v1.3.0 h1:RC+jryuKeytIiictDslBP9i1fhkVm6ZDmZEoNP316zE=
cosmossdk.io/math v1.3.0/go.mod h1:vnRTxewy+M7BtXBNFybkuhSH4WfedVAAnERHgVFhp3k=
github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
github.com/DataDog/zstd v1.5.5 h1:oWf5W7GtOLgp6bciQYDmhHHjdhYkALu6S/5Ni9ZgSvQ=
github.com/DataDog/zstd v1.5.5/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw=
@ -19,9 +21,11 @@ github.com/btcsuite/btcd/btcutil v1.1.3 h1:xfbtw8lwpp0G6NwSHb+UE67ryTFHJAiNuipus
github.com/btcsuite/btcd/btcutil v1.1.3/go.mod h1:UR7dsSJzJUfMmFiiLlIrMq1lS9jh9EdCV7FStZSnpi0=
github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1 h1:q0rUy8C/TYNBQS1+CGKw68tLOFYSNEs0TFnxxnS9+4U=
github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc=
github.com/bufbuild/protocompile v0.6.0 h1:Uu7WiSQ6Yj9DbkdnOe7U4mNKp58y9WDMKDn28/ZlunY=
github.com/bufbuild/protocompile v0.6.0/go.mod h1:YNP35qEYoYGme7QMtz5SBCoN4kL4g12jTtjuzRNdjpE=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
@ -46,8 +50,8 @@ github.com/cosmos/cosmos-db v1.0.2 h1:hwMjozuY1OlJs/uh6vddqnk9j7VamLv+0DBlbEXbAK
github.com/cosmos/cosmos-db v1.0.2/go.mod h1:Z8IXcFJ9PqKK6BIsVOB3QXtkKoqUOp1vRvPT39kOXEA=
github.com/cosmos/gogoproto v1.4.12 h1:vB6Lbe/rtnYGjQuFxkPiPYiCybqFT8QvLipDZP8JpFE=
github.com/cosmos/gogoproto v1.4.12/go.mod h1:LnZob1bXRdUoqMMtwYlcR3wjiElmlC+FkjaZRv1/eLY=
github.com/cosmos/iavl v1.1.1 h1:64nTi8s3gEoGqhA8TyAWFWfz7/pg0anKzHNSc1ETc7Q=
github.com/cosmos/iavl v1.1.1/go.mod h1:jLeUvm6bGT1YutCaL2fIar/8vGUE8cPZvh/gXEWDaDM=
github.com/cosmos/iavl v1.1.2 h1:zL9FK7C4L/P4IF1Dm5fIwz0WXCnn7Bp1M2FxH0ayM7Y=
github.com/cosmos/iavl v1.1.2/go.mod h1:jLeUvm6bGT1YutCaL2fIar/8vGUE8cPZvh/gXEWDaDM=
github.com/cosmos/ics23/go v0.10.0 h1:iXqLLgp2Lp+EdpIuwXTYIQU+AiHj9mOC2X9ab++bZDM=
github.com/cosmos/ics23/go v0.10.0/go.mod h1:ZfJSmng/TBNTBkFemHHHj5YY7VAU/MBU980F4VU1NG0=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
@ -57,10 +61,13 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/decred/dcrd/crypto/blake256 v1.0.1 h1:7PltbUIQB7u/FfZ39+DGa/ShuMyJ5ilcvdfma9wOH6Y=
github.com/decred/dcrd/crypto/blake256 v1.0.1/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo=
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 h1:rpfIENRNNilwHwZeG5+P150SMrnNEcHYvcCuK6dPZSg=
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0=
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 h1:8UrgZ3GkP4i/CLijOJx79Yu+etlyjdBU4sfcs2WYQMs=
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0=
github.com/emicklei/dot v1.6.1 h1:ujpDlBkkwgWUY+qPId5IwapRW/xEoligRSYjioR6DFI=
github.com/emicklei/dot v1.6.1/go.mod h1:DeV7GvQtIw4h2u73RKBkkFdvVAz0D9fzeJrgPW6gy/s=
github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk=
github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs=
github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw=
github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
@ -110,10 +117,14 @@ github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeN
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
github.com/hashicorp/go-hclog v1.5.0 h1:bI2ocEMgcVlz55Oj1xZNBsVi900c7II+fWDyV9o+13c=
github.com/hashicorp/go-hclog v1.5.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M=
github.com/hashicorp/go-immutable-radix v1.0.0 h1:AKDB1HM5PWEA7i4nhcpwOrO2byshxBjXVn/J/3+z5/0=
github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
github.com/hashicorp/go-metrics v0.5.3 h1:M5uADWMOGCTUNU1YuC4hfknOeHNaX54LDm4oYSucoNE=
github.com/hashicorp/go-metrics v0.5.3/go.mod h1:KEjodfebIOuBYSAe/bHTm+HChmKSxAOXPBieMLYozDE=
github.com/hashicorp/go-metrics v0.5.1 h1:rfPwUqFU6uZXNvGl4hzjY8LEBsqFVU4si1H9/Hqck/U=
github.com/hashicorp/go-metrics v0.5.1/go.mod h1:KEjodfebIOuBYSAe/bHTm+HChmKSxAOXPBieMLYozDE=
github.com/hashicorp/go-plugin v1.5.2 h1:aWv8eimFqWlsEiMrYZdPYl+FdHaBJSN4AWwGWfT1G2Y=
github.com/hashicorp/go-plugin v1.5.2/go.mod h1:w1sAEES3g3PuV/RzUrgow20W2uErMly84hhD3um1WL4=
github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs=
github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE=
@ -121,8 +132,12 @@ github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/b
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c=
github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
github.com/hashicorp/yamux v0.1.1 h1:yrQxtgseBDrq9Y652vSRDvsKCJKOUD+GzTS4Y0Y8pvE=
github.com/hashicorp/yamux v0.1.1/go.mod h1:CtWFDAQgb7dxtzFs4tWbplKIe2jSi3+5vKbgIO0SLnQ=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/jhump/protoreflect v1.15.3 h1:6SFRuqU45u9hIZPJAoZ8c28T3nK64BNdp9w6jFonzls=
github.com/jhump/protoreflect v1.15.3/go.mod h1:4ORHmSBmlCW8fh3xHmJMGyul1zNqZK4Elxc8qKP+p1k=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
@ -141,15 +156,19 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/linxGnu/grocksdb v1.8.14 h1:HTgyYalNwBSG/1qCQUIott44wU5b2Y9Kr3z7SK5OfGQ=
github.com/linxGnu/grocksdb v1.8.14/go.mod h1:QYiYypR2d4v63Wj1adOOfzglnoII0gLj3PNh4fZkcFA=
github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4=
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU=
github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/mitchellh/go-testing-interface v1.14.1 h1:jrgshOhYAUVNMAJiKbEu7EqAwgJJ2JqpQmpLJOu07cU=
github.com/mitchellh/go-testing-interface v1.14.1/go.mod h1:gfgS7OtZj6MA4U1UrDRp04twqAjfvlZyCfX3sDjEym8=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
@ -160,6 +179,8 @@ github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
github.com/oasisprotocol/curve25519-voi v0.0.0-20220708102147-0a8a51822cae h1:FatpGJD2jmJfhZiFDElaC0QhZUDQnxUeAwTGkfAHN3I=
github.com/oasisprotocol/curve25519-voi v0.0.0-20220708102147-0a8a51822cae/go.mod h1:hVoHR2EVESiICEMbg137etN/Lx+lSrHPTD39Z/uE+2s=
github.com/oklog/run v1.1.0 h1:GEenZ1cK0+q0+wsJew9qUg/DyD8k3JzYsZAi5gYi2mA=
github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
@ -195,12 +216,12 @@ github.com/prometheus/client_golang v1.19.0/go.mod h1:ZRM9uEAypZakd+q/x7+gmsvXdU
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.6.0 h1:k1v3CzpSRUTrKMppY35TLwPvxHqBu0bYgxZzqGIgaos=
github.com/prometheus/client_model v0.6.0/go.mod h1:NTQHnmxFpouOD0DpvP4XujX3CdOAGQPoaGhyTchlyt8=
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4=
github.com/prometheus/common v0.53.0 h1:U2pL9w9nmJwJDa4qqLQ3ZaePJ6ZTwt7cMD3AG3+aLCE=
github.com/prometheus/common v0.53.0/go.mod h1:BrxBKv3FWBIGXw89Mg1AeBq7FSyRzXWI3l3e7W3RN5U=
github.com/prometheus/common v0.52.2 h1:LW8Vk7BccEdONfrJBDffQGRtpSzi5CQaRZGtboOO2ck=
github.com/prometheus/common v0.52.2/go.mod h1:lrWtQx+iDfn2mbH5GUzlH9TSHyfZpHkSiG1W7y3sF2Q=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
@ -229,19 +250,23 @@ github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsT
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d h1:vfofYNRScrDdvS342BElfbETmL1Aiz3i2t0zfRj16Hs=
github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d/go.mod h1:RRCYJbIwD5jmqPI9XoAFR0OcDxqUctll6zUj/+B4S48=
github.com/tidwall/btree v1.7.0 h1:L1fkJH/AuEh5zBnnBbmTwQ5Lt+bRJ5A8EWecslvo9iI=
github.com/tidwall/btree v1.7.0/go.mod h1:twD9XRA5jj9VUQGELzDO4HPQTNJsoWWfYEL+EUQ2cKY=
github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.22.0 h1:g1v0xeRhjcugydODzvb3mEM9SQ0HGp9s/nh3COQ/C30=
golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M=
golang.org/x/exp v0.0.0-20240314144324-c7f7c6466f7f h1:3CW0unweImhOzd5FmYuRsD4Y4oQFKZIjAnKbjV4WIrw=
golang.org/x/exp v0.0.0-20240314144324-c7f7c6466f7f/go.mod h1:CxmFvTBINI24O/j8iY7H1xHzx2i4OsyguNBmN/uPtqc=
golang.org/x/exp v0.0.0-20240404231335-c0f41cb1a7a0 h1:985EYyeCOxTpcgOTJpflJUwOeEz0CQOdPt73OzpE9F8=
golang.org/x/exp v0.0.0-20240404231335-c0f41cb1a7a0/go.mod h1:/lliqkxwWAhPjf5oSOIJup2XcqJaw8RGS6k3TGEc7GI=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
@ -250,17 +275,19 @@ golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLL
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w=
golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8=
golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs=
golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@ -273,15 +300,22 @@ golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
@ -301,15 +335,16 @@ golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtn
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda h1:LI5DOvAxUPMv/50agcLLoo+AdWc1irS9Rzz4vPuD1V4=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY=
google.golang.org/grpc v1.63.2 h1:MUeiw1B2maTVZthpU5xvASfTh3LDbxHd6IJ6QQVU+xM=
google.golang.org/grpc v1.63.2/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA=
google.golang.org/genproto/googleapis/rpc v0.0.0-20231212172506-995d672761c0 h1:/jFB8jK5R3Sq3i/lmeZO0cATSzFfZaJq1J2Euan3XKU=
google.golang.org/genproto/googleapis/rpc v0.0.0-20231212172506-995d672761c0/go.mod h1:FUoWkonphQm3RhTS+kOEhF8h0iDpm4tdXolVCeZ9KKA=
google.golang.org/grpc v1.61.1 h1:kLAiWrZs7YeDM6MumDe7m3y4aM6wacLzM1Y/wiLP9XY=
google.golang.org/grpc v1.61.1/go.mod h1:VUbo7IFqmF1QtCAstipjG0GIoq49KvMe9+h1jFLBNJs=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
@ -318,8 +353,8 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.34.0 h1:Qo/qEd2RZPCf2nKuorzksSknv0d3ERwp1vFG38gSmH4=
google.golang.org/protobuf v1.34.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
@ -336,6 +371,7 @@ gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU=
gotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU=
sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=

417
store/iavl/store.go Normal file
View File

@ -0,0 +1,417 @@
package iavl
import (
"errors"
"fmt"
"io"
cmtprotocrypto "github.com/cometbft/cometbft/proto/tendermint/crypto"
dbm "github.com/cosmos/cosmos-db"
"github.com/cosmos/iavl"
ics23 "github.com/cosmos/ics23/go"
errorsmod "cosmossdk.io/errors"
"cosmossdk.io/log"
"cosmossdk.io/store/cachekv"
"cosmossdk.io/store/internal/kv"
"cosmossdk.io/store/metrics"
pruningtypes "cosmossdk.io/store/pruning/types"
"cosmossdk.io/store/tracekv"
"cosmossdk.io/store/types"
"cosmossdk.io/store/wrapper"
)
const (
DefaultIAVLCacheSize = 500000
)
var (
_ types.KVStore = (*Store)(nil)
_ types.CommitStore = (*Store)(nil)
_ types.CommitKVStore = (*Store)(nil)
_ types.Queryable = (*Store)(nil)
_ types.StoreWithInitialVersion = (*Store)(nil)
)
// Store Implements types.KVStore and CommitKVStore.
type Store struct {
tree Tree
logger log.Logger
metrics metrics.StoreMetrics
}
// LoadStore returns an IAVL Store as a CommitKVStore. Internally, it will load the
// store's version (id) from the provided DB. An error is returned if the version
// fails to load, or if called with a positive version on an empty tree.
func LoadStore(db dbm.DB, logger log.Logger, key types.StoreKey, id types.CommitID, cacheSize int, disableFastNode bool, metrics metrics.StoreMetrics) (types.CommitKVStore, error) {
return LoadStoreWithInitialVersion(db, logger, key, id, 0, cacheSize, disableFastNode, metrics)
}
// LoadStoreWithInitialVersion returns an IAVL Store as a CommitKVStore setting its initialVersion
// to the one given. Internally, it will load the store's version (id) from the
// provided DB. An error is returned if the version fails to load, or if called with a positive
// version on an empty tree.
func LoadStoreWithInitialVersion(db dbm.DB, logger log.Logger, key types.StoreKey, id types.CommitID, initialVersion uint64, cacheSize int, disableFastNode bool, metrics metrics.StoreMetrics) (types.CommitKVStore, error) {
tree := iavl.NewMutableTree(wrapper.NewDBWrapper(db), cacheSize, disableFastNode, logger, iavl.InitialVersionOption(initialVersion))
isUpgradeable, err := tree.IsUpgradeable()
if err != nil {
return nil, err
}
if isUpgradeable && logger != nil {
logger.Info(
"Upgrading IAVL storage for faster queries + execution on live state. This may take a while",
"store_key", key.String(),
"version", initialVersion,
"commit", fmt.Sprintf("%X", id),
)
}
_, err = tree.LoadVersion(id.Version)
if err != nil {
return nil, err
}
if logger != nil {
logger.Debug("Finished loading IAVL tree")
}
return &Store{
tree: tree,
logger: logger,
metrics: metrics,
}, nil
}
// UnsafeNewStore returns a reference to a new IAVL Store with a given mutable
// IAVL tree reference. It should only be used for testing purposes.
//
// CONTRACT: The IAVL tree should be fully loaded.
// CONTRACT: PruningOptions passed in as argument must be the same as pruning options
// passed into iavl.MutableTree
func UnsafeNewStore(tree *iavl.MutableTree) *Store {
return &Store{
tree: tree,
metrics: metrics.NewNoOpMetrics(),
}
}
// GetImmutable returns a reference to a new store backed by an immutable IAVL
// tree at a specific version (height) without any pruning options. This should
// be used for querying and iteration only. If the version does not exist or has
// been pruned, an empty immutable IAVL tree will be used.
// Any mutable operations executed will result in a panic.
func (st *Store) GetImmutable(version int64) (*Store, error) {
if !st.VersionExists(version) {
return nil, errors.New("version mismatch on immutable IAVL tree; version does not exist. Version has either been pruned, or is for a future block height")
}
iTree, err := st.tree.GetImmutable(version)
if err != nil {
return nil, err
}
return &Store{
tree: &immutableTree{iTree},
metrics: st.metrics,
}, nil
}
// Commit commits the current store state and returns a CommitID with the new
// version and hash.
func (st *Store) Commit() types.CommitID {
defer st.metrics.MeasureSince("store", "iavl", "commit")
hash, version, err := st.tree.SaveVersion()
if err != nil {
panic(err)
}
return types.CommitID{
Version: version,
Hash: hash,
}
}
// WorkingHash returns the hash of the current working tree.
func (st *Store) WorkingHash() []byte {
return st.tree.WorkingHash()
}
// LastCommitID implements Committer.
func (st *Store) LastCommitID() types.CommitID {
return types.CommitID{
Version: st.tree.Version(),
Hash: st.tree.Hash(),
}
}
// SetPruning panics as pruning options should be provided at initialization
// since IAVl accepts pruning options directly.
func (st *Store) SetPruning(_ pruningtypes.PruningOptions) {
panic("cannot set pruning options on an initialized IAVL store")
}
// SetPruning panics as pruning options should be provided at initialization
// since IAVl accepts pruning options directly.
func (st *Store) GetPruning() pruningtypes.PruningOptions {
panic("cannot get pruning options on an initialized IAVL store")
}
// VersionExists returns whether or not a given version is stored.
func (st *Store) VersionExists(version int64) bool {
return st.tree.VersionExists(version)
}
// GetAllVersions returns all versions in the iavl tree
func (st *Store) GetAllVersions() []int {
return st.tree.AvailableVersions()
}
// Implements Store.
func (st *Store) GetStoreType() types.StoreType {
return types.StoreTypeIAVL
}
// Implements Store.
func (st *Store) CacheWrap() types.CacheWrap {
return cachekv.NewStore(st)
}
// CacheWrapWithTrace implements the Store interface.
func (st *Store) CacheWrapWithTrace(w io.Writer, tc types.TraceContext) types.CacheWrap {
return cachekv.NewStore(tracekv.NewStore(st, w, tc))
}
// Implements types.KVStore.
func (st *Store) Set(key, value []byte) {
types.AssertValidKey(key)
types.AssertValidValue(value)
_, err := st.tree.Set(key, value)
if err != nil && st.logger != nil {
st.logger.Error("iavl set error", "error", err.Error())
}
}
// Implements types.KVStore.
func (st *Store) Get(key []byte) []byte {
defer st.metrics.MeasureSince("store", "iavl", "get")
value, err := st.tree.Get(key)
if err != nil {
panic(err)
}
return value
}
// Implements types.KVStore.
func (st *Store) Has(key []byte) (exists bool) {
defer st.metrics.MeasureSince("store", "iavl", "has")
has, err := st.tree.Has(key)
if err != nil {
panic(err)
}
return has
}
// Implements types.KVStore.
func (st *Store) Delete(key []byte) {
defer st.metrics.MeasureSince("store", "iavl", "delete")
_, _, err := st.tree.Remove(key)
if err != nil {
panic(err)
}
}
// DeleteVersionsTo deletes versions upto the given version from the MutableTree. An error
// is returned if any single version is invalid or the delete fails. All writes
// happen in a single batch with a single commit.
func (st *Store) DeleteVersionsTo(version int64) error {
return st.tree.DeleteVersionsTo(version)
}
// LoadVersionForOverwriting attempts to load a tree at a previously committed
// version. Any versions greater than targetVersion will be deleted.
func (st *Store) LoadVersionForOverwriting(targetVersion int64) error {
return st.tree.LoadVersionForOverwriting(targetVersion)
}
// Implements types.KVStore.
func (st *Store) Iterator(start, end []byte) types.Iterator {
iterator, err := st.tree.Iterator(start, end, true)
if err != nil {
panic(err)
}
return iterator
}
// Implements types.KVStore.
func (st *Store) ReverseIterator(start, end []byte) types.Iterator {
iterator, err := st.tree.Iterator(start, end, false)
if err != nil {
panic(err)
}
return iterator
}
// SetInitialVersion sets the initial version of the IAVL tree. It is used when
// starting a new chain at an arbitrary height.
func (st *Store) SetInitialVersion(version int64) {
st.tree.SetInitialVersion(uint64(version))
}
// Exports the IAVL store at the given version, returning an iavl.Exporter for the tree.
func (st *Store) Export(version int64) (*iavl.Exporter, error) {
istore, err := st.GetImmutable(version)
if err != nil {
return nil, errorsmod.Wrapf(err, "iavl export failed for version %v", version)
}
tree, ok := istore.tree.(*immutableTree)
if !ok || tree == nil {
return nil, fmt.Errorf("iavl export failed: unable to fetch tree for version %v", version)
}
return tree.Export()
}
// Import imports an IAVL tree at the given version, returning an iavl.Importer for importing.
func (st *Store) Import(version int64) (*iavl.Importer, error) {
tree, ok := st.tree.(*iavl.MutableTree)
if !ok {
return nil, errors.New("iavl import failed: unable to find mutable tree")
}
return tree.Import(version)
}
// Handle gatest the latest height, if height is 0
func getHeight(tree Tree, req *types.RequestQuery) int64 {
height := req.Height
if height == 0 {
latest := tree.Version()
if tree.VersionExists(latest - 1) {
height = latest - 1
} else {
height = latest
}
}
return height
}
// Query implements ABCI interface, allows queries
//
// by default we will return from (latest height -1),
// as we will have merkle proofs immediately (header height = data height + 1)
// If latest-1 is not present, use latest (which must be present)
// if you care to have the latest data to see a tx results, you must
// explicitly set the height you want to see
func (st *Store) Query(req *types.RequestQuery) (res *types.ResponseQuery, err error) {
defer st.metrics.MeasureSince("store", "iavl", "query")
if len(req.Data) == 0 {
return &types.ResponseQuery{}, errorsmod.Wrap(types.ErrTxDecode, "query cannot be zero length")
}
tree := st.tree
// store the height we chose in the response, with 0 being changed to the
// latest height
res = &types.ResponseQuery{
Height: getHeight(tree, req),
}
switch req.Path {
case "/key": // get by key
key := req.Data // data holds the key bytes
res.Key = key
if !st.VersionExists(res.Height) {
res.Log = iavl.ErrVersionDoesNotExist.Error()
break
}
value, err := tree.GetVersioned(key, res.Height)
if err != nil {
panic(err)
}
res.Value = value
if !req.Prove {
break
}
// Continue to prove existence/absence of value
// Must convert store.Tree to iavl.MutableTree with given version to use in CreateProof
iTree, err := tree.GetImmutable(res.Height)
if err != nil {
// sanity check: If value for given version was retrieved, immutable tree must also be retrievable
panic(fmt.Sprintf("version exists in store but could not retrieve corresponding versioned tree in store, %s", err.Error()))
}
mtree := &iavl.MutableTree{
ImmutableTree: iTree,
}
// get proof from tree and convert to merkle.Proof before adding to result
res.ProofOps = getProofFromTree(mtree, req.Data, res.Value != nil)
case "/subspace":
pairs := kv.Pairs{
Pairs: make([]kv.Pair, 0),
}
subspace := req.Data
res.Key = subspace
iterator := types.KVStorePrefixIterator(st, subspace)
for ; iterator.Valid(); iterator.Next() {
pairs.Pairs = append(pairs.Pairs, kv.Pair{Key: iterator.Key(), Value: iterator.Value()})
}
if err := iterator.Close(); err != nil {
panic(fmt.Errorf("failed to close iterator: %w", err))
}
bz, err := pairs.Marshal()
if err != nil {
panic(fmt.Errorf("failed to marshal KV pairs: %w", err))
}
res.Value = bz
default:
return &types.ResponseQuery{}, errorsmod.Wrapf(types.ErrUnknownRequest, "unexpected query path: %v", req.Path)
}
return res, err
}
// TraverseStateChanges traverses the state changes between two versions and calls the given function.
func (st *Store) TraverseStateChanges(startVersion, endVersion int64, fn func(version int64, changeSet *iavl.ChangeSet) error) error {
return st.tree.TraverseStateChanges(startVersion, endVersion, fn)
}
// Takes a MutableTree, a key, and a flag for creating existence or absence proof and returns the
// appropriate merkle.Proof. Since this must be called after querying for the value, this function should never error
// Thus, it will panic on error rather than returning it
func getProofFromTree(tree *iavl.MutableTree, key []byte, exists bool) *cmtprotocrypto.ProofOps {
var (
commitmentProof *ics23.CommitmentProof
err error
)
if exists {
// value was found
commitmentProof, err = tree.GetMembershipProof(key)
if err != nil {
// sanity check: If value was found, membership proof must be creatable
panic(fmt.Sprintf("unexpected value for empty proof: %s", err.Error()))
}
} else {
// value wasn't found
commitmentProof, err = tree.GetNonMembershipProof(key)
if err != nil {
// sanity check: If value wasn't found, nonmembership proof must be creatable
panic(fmt.Sprintf("unexpected error for nonexistence proof: %s", err.Error()))
}
}
op := types.NewIavlCommitmentOp(key, commitmentProof)
return &cmtprotocrypto.ProofOps{Ops: []cmtprotocrypto.ProofOp{op.ProofOp()}}
}

714
store/iavl/store_test.go Normal file
View File

@ -0,0 +1,714 @@
package iavl
import (
"bytes"
crand "crypto/rand"
"fmt"
"math"
"sort"
"testing"
dbm "github.com/cosmos/cosmos-db"
"github.com/cosmos/iavl"
"github.com/stretchr/testify/require"
"cosmossdk.io/log"
"cosmossdk.io/store/cachekv"
"cosmossdk.io/store/internal/kv"
"cosmossdk.io/store/metrics"
"cosmossdk.io/store/types"
"cosmossdk.io/store/wrapper"
)
var (
cacheSize = 100
treeData = map[string]string{
"hello": "goodbye",
"aloha": "shalom",
}
nMoreData = 0
)
func randBytes(numBytes int) []byte {
b := make([]byte, numBytes)
_, _ = crand.Read(b)
return b
}
// make a tree with data from above and save it
func newAlohaTree(t *testing.T, db dbm.DB) (*iavl.MutableTree, types.CommitID) {
t.Helper()
tree := iavl.NewMutableTree(wrapper.NewDBWrapper(db), cacheSize, false, log.NewNopLogger())
for k, v := range treeData {
_, err := tree.Set([]byte(k), []byte(v))
require.NoError(t, err)
}
for i := 0; i < nMoreData; i++ {
key := randBytes(12)
value := randBytes(50)
_, err := tree.Set(key, value)
require.NoError(t, err)
}
hash, ver, err := tree.SaveVersion()
require.Nil(t, err)
return tree, types.CommitID{Version: ver, Hash: hash}
}
func TestLoadStore(t *testing.T) {
db := dbm.NewMemDB()
tree, _ := newAlohaTree(t, db)
store := UnsafeNewStore(tree)
// Create non-pruned height H
updated, err := tree.Set([]byte("hello"), []byte("hallo"))
require.NoError(t, err)
require.True(t, updated)
hash, verH, err := tree.SaveVersion()
cIDH := types.CommitID{Version: verH, Hash: hash}
require.Nil(t, err)
// Create pruned height Hp
updated, err = tree.Set([]byte("hello"), []byte("hola"))
require.NoError(t, err)
require.True(t, updated)
hash, verHp, err := tree.SaveVersion()
cIDHp := types.CommitID{Version: verHp, Hash: hash}
require.Nil(t, err)
// TODO: Prune this height
// Create current height Hc
updated, err = tree.Set([]byte("hello"), []byte("ciao"))
require.NoError(t, err)
require.True(t, updated)
hash, verHc, err := tree.SaveVersion()
cIDHc := types.CommitID{Version: verHc, Hash: hash}
require.Nil(t, err)
// Querying an existing store at some previous non-pruned height H
hStore, err := store.GetImmutable(verH)
require.NoError(t, err)
require.Equal(t, string(hStore.Get([]byte("hello"))), "hallo")
// Querying an existing store at some previous pruned height Hp
hpStore, err := store.GetImmutable(verHp)
require.NoError(t, err)
require.Equal(t, string(hpStore.Get([]byte("hello"))), "hola")
// Querying an existing store at current height Hc
hcStore, err := store.GetImmutable(verHc)
require.NoError(t, err)
require.Equal(t, string(hcStore.Get([]byte("hello"))), "ciao")
// Querying a new store at some previous non-pruned height H
newHStore, err := LoadStore(db, log.NewNopLogger(), types.NewKVStoreKey("test"), cIDH, DefaultIAVLCacheSize, false, metrics.NewNoOpMetrics())
require.NoError(t, err)
require.Equal(t, string(newHStore.Get([]byte("hello"))), "hallo")
// Querying a new store at some previous pruned height Hp
newHpStore, err := LoadStore(db, log.NewNopLogger(), types.NewKVStoreKey("test"), cIDHp, DefaultIAVLCacheSize, false, metrics.NewNoOpMetrics())
require.NoError(t, err)
require.Equal(t, string(newHpStore.Get([]byte("hello"))), "hola")
// Querying a new store at current height H
newHcStore, err := LoadStore(db, log.NewNopLogger(), types.NewKVStoreKey("test"), cIDHc, DefaultIAVLCacheSize, false, metrics.NewNoOpMetrics())
require.NoError(t, err)
require.Equal(t, string(newHcStore.Get([]byte("hello"))), "ciao")
}
func TestGetImmutable(t *testing.T) {
db := dbm.NewMemDB()
tree, _ := newAlohaTree(t, db)
store := UnsafeNewStore(tree)
updated, err := tree.Set([]byte("hello"), []byte("adios"))
require.NoError(t, err)
require.True(t, updated)
hash, ver, err := tree.SaveVersion()
cID := types.CommitID{Version: ver, Hash: hash}
require.Nil(t, err)
_, err = store.GetImmutable(cID.Version + 1)
require.Error(t, err)
newStore, err := store.GetImmutable(cID.Version - 1)
require.NoError(t, err)
require.Equal(t, newStore.Get([]byte("hello")), []byte("goodbye"))
newStore, err = store.GetImmutable(cID.Version)
require.NoError(t, err)
require.Equal(t, newStore.Get([]byte("hello")), []byte("adios"))
res, err := newStore.Query(&types.RequestQuery{Data: []byte("hello"), Height: cID.Version, Path: "/key", Prove: true})
require.NoError(t, err)
require.Equal(t, res.Value, []byte("adios"))
require.NotNil(t, res.ProofOps)
require.Panics(t, func() { newStore.Set(nil, nil) })
require.Panics(t, func() { newStore.Delete(nil) })
require.Panics(t, func() { newStore.Commit() })
}
func TestTestGetImmutableIterator(t *testing.T) {
db := dbm.NewMemDB()
tree, cID := newAlohaTree(t, db)
store := UnsafeNewStore(tree)
newStore, err := store.GetImmutable(cID.Version)
require.NoError(t, err)
iter := newStore.Iterator([]byte("aloha"), []byte("hellz"))
expected := []string{"aloha", "hello"}
var i int
for i = 0; iter.Valid(); iter.Next() {
expectedKey := expected[i]
key, value := iter.Key(), iter.Value()
require.EqualValues(t, key, expectedKey)
require.EqualValues(t, value, treeData[expectedKey])
i++
}
require.Equal(t, len(expected), i)
}
func TestIAVLStoreGetSetHasDelete(t *testing.T) {
db := dbm.NewMemDB()
tree, _ := newAlohaTree(t, db)
iavlStore := UnsafeNewStore(tree)
key := "hello"
exists := iavlStore.Has([]byte(key))
require.True(t, exists)
value := iavlStore.Get([]byte(key))
require.EqualValues(t, value, treeData[key])
value2 := "notgoodbye"
iavlStore.Set([]byte(key), []byte(value2))
value = iavlStore.Get([]byte(key))
require.EqualValues(t, value, value2)
iavlStore.Delete([]byte(key))
exists = iavlStore.Has([]byte(key))
require.False(t, exists)
}
func TestIAVLStoreNoNilSet(t *testing.T) {
db := dbm.NewMemDB()
tree, _ := newAlohaTree(t, db)
iavlStore := UnsafeNewStore(tree)
require.Panics(t, func() { iavlStore.Set(nil, []byte("value")) }, "setting a nil key should panic")
require.Panics(t, func() { iavlStore.Set([]byte(""), []byte("value")) }, "setting an empty key should panic")
require.Panics(t, func() { iavlStore.Set([]byte("key"), nil) }, "setting a nil value should panic")
}
func TestIAVLIterator(t *testing.T) {
db := dbm.NewMemDB()
tree, _ := newAlohaTree(t, db)
iavlStore := UnsafeNewStore(tree)
iter := iavlStore.Iterator([]byte("aloha"), []byte("hellz"))
expected := []string{"aloha", "hello"}
var i int
for i = 0; iter.Valid(); iter.Next() {
expectedKey := expected[i]
key, value := iter.Key(), iter.Value()
require.EqualValues(t, key, expectedKey)
require.EqualValues(t, value, treeData[expectedKey])
i++
}
require.Equal(t, len(expected), i)
iter = iavlStore.Iterator([]byte("golang"), []byte("rocks"))
expected = []string{"hello"}
for i = 0; iter.Valid(); iter.Next() {
expectedKey := expected[i]
key, value := iter.Key(), iter.Value()
require.EqualValues(t, key, expectedKey)
require.EqualValues(t, value, treeData[expectedKey])
i++
}
require.Equal(t, len(expected), i)
iter = iavlStore.Iterator(nil, []byte("golang"))
expected = []string{"aloha"}
for i = 0; iter.Valid(); iter.Next() {
expectedKey := expected[i]
key, value := iter.Key(), iter.Value()
require.EqualValues(t, key, expectedKey)
require.EqualValues(t, value, treeData[expectedKey])
i++
}
require.Equal(t, len(expected), i)
iter = iavlStore.Iterator(nil, []byte("shalom"))
expected = []string{"aloha", "hello"}
for i = 0; iter.Valid(); iter.Next() {
expectedKey := expected[i]
key, value := iter.Key(), iter.Value()
require.EqualValues(t, key, expectedKey)
require.EqualValues(t, value, treeData[expectedKey])
i++
}
require.Equal(t, len(expected), i)
iter = iavlStore.Iterator(nil, nil)
expected = []string{"aloha", "hello"}
for i = 0; iter.Valid(); iter.Next() {
expectedKey := expected[i]
key, value := iter.Key(), iter.Value()
require.EqualValues(t, key, expectedKey)
require.EqualValues(t, value, treeData[expectedKey])
i++
}
require.Equal(t, len(expected), i)
iter = iavlStore.Iterator([]byte("golang"), nil)
expected = []string{"hello"}
for i = 0; iter.Valid(); iter.Next() {
expectedKey := expected[i]
key, value := iter.Key(), iter.Value()
require.EqualValues(t, key, expectedKey)
require.EqualValues(t, value, treeData[expectedKey])
i++
}
require.Equal(t, len(expected), i)
}
func TestIAVLReverseIterator(t *testing.T) {
db := wrapper.NewDBWrapper(dbm.NewMemDB())
tree := iavl.NewMutableTree(db, cacheSize, false, log.NewNopLogger())
iavlStore := UnsafeNewStore(tree)
iavlStore.Set([]byte{0x00}, []byte("0"))
iavlStore.Set([]byte{0x00, 0x00}, []byte("0 0"))
iavlStore.Set([]byte{0x00, 0x01}, []byte("0 1"))
iavlStore.Set([]byte{0x00, 0x02}, []byte("0 2"))
iavlStore.Set([]byte{0x01}, []byte("1"))
testReverseIterator := func(t *testing.T, start, end []byte, expected []string) {
t.Helper()
iter := iavlStore.ReverseIterator(start, end)
var i int
for i = 0; iter.Valid(); iter.Next() {
expectedValue := expected[i]
value := iter.Value()
require.EqualValues(t, string(value), expectedValue)
i++
}
require.Equal(t, len(expected), i)
}
testReverseIterator(t, nil, nil, []string{"1", "0 2", "0 1", "0 0", "0"})
testReverseIterator(t, []byte{0x00}, nil, []string{"1", "0 2", "0 1", "0 0", "0"})
testReverseIterator(t, []byte{0x00}, []byte{0x00, 0x01}, []string{"0 0", "0"})
testReverseIterator(t, []byte{0x00}, []byte{0x01}, []string{"0 2", "0 1", "0 0", "0"})
testReverseIterator(t, []byte{0x00, 0x01}, []byte{0x01}, []string{"0 2", "0 1"})
testReverseIterator(t, nil, []byte{0x01}, []string{"0 2", "0 1", "0 0", "0"})
}
func TestIAVLPrefixIterator(t *testing.T) {
db := wrapper.NewDBWrapper(dbm.NewMemDB())
tree := iavl.NewMutableTree(db, cacheSize, false, log.NewNopLogger())
iavlStore := UnsafeNewStore(tree)
iavlStore.Set([]byte("test1"), []byte("test1"))
iavlStore.Set([]byte("test2"), []byte("test2"))
iavlStore.Set([]byte("test3"), []byte("test3"))
iavlStore.Set([]byte{byte(55), byte(255), byte(255), byte(0)}, []byte("test4"))
iavlStore.Set([]byte{byte(55), byte(255), byte(255), byte(1)}, []byte("test4"))
iavlStore.Set([]byte{byte(55), byte(255), byte(255), byte(255)}, []byte("test4"))
iavlStore.Set([]byte{byte(255), byte(255), byte(0)}, []byte("test4"))
iavlStore.Set([]byte{byte(255), byte(255), byte(1)}, []byte("test4"))
iavlStore.Set([]byte{byte(255), byte(255), byte(255)}, []byte("test4"))
var i int
iter := types.KVStorePrefixIterator(iavlStore, []byte("test"))
expected := []string{"test1", "test2", "test3"}
for i = 0; iter.Valid(); iter.Next() {
expectedKey := expected[i]
key, value := iter.Key(), iter.Value()
require.EqualValues(t, key, expectedKey)
require.EqualValues(t, value, expectedKey)
i++
}
iter.Close()
require.Equal(t, len(expected), i)
iter = types.KVStorePrefixIterator(iavlStore, []byte{byte(55), byte(255), byte(255)})
expected2 := [][]byte{
{byte(55), byte(255), byte(255), byte(0)},
{byte(55), byte(255), byte(255), byte(1)},
{byte(55), byte(255), byte(255), byte(255)},
}
for i = 0; iter.Valid(); iter.Next() {
expectedKey := expected2[i]
key, value := iter.Key(), iter.Value()
require.EqualValues(t, key, expectedKey)
require.EqualValues(t, value, []byte("test4"))
i++
}
iter.Close()
require.Equal(t, len(expected), i)
iter = types.KVStorePrefixIterator(iavlStore, []byte{byte(255), byte(255)})
expected2 = [][]byte{
{byte(255), byte(255), byte(0)},
{byte(255), byte(255), byte(1)},
{byte(255), byte(255), byte(255)},
}
for i = 0; iter.Valid(); iter.Next() {
expectedKey := expected2[i]
key, value := iter.Key(), iter.Value()
require.EqualValues(t, key, expectedKey)
require.EqualValues(t, value, []byte("test4"))
i++
}
iter.Close()
require.Equal(t, len(expected), i)
}
func TestIAVLReversePrefixIterator(t *testing.T) {
db := wrapper.NewDBWrapper(dbm.NewMemDB())
tree := iavl.NewMutableTree(db, cacheSize, false, log.NewNopLogger())
iavlStore := UnsafeNewStore(tree)
iavlStore.Set([]byte("test1"), []byte("test1"))
iavlStore.Set([]byte("test2"), []byte("test2"))
iavlStore.Set([]byte("test3"), []byte("test3"))
iavlStore.Set([]byte{byte(55), byte(255), byte(255), byte(0)}, []byte("test4"))
iavlStore.Set([]byte{byte(55), byte(255), byte(255), byte(1)}, []byte("test4"))
iavlStore.Set([]byte{byte(55), byte(255), byte(255), byte(255)}, []byte("test4"))
iavlStore.Set([]byte{byte(255), byte(255), byte(0)}, []byte("test4"))
iavlStore.Set([]byte{byte(255), byte(255), byte(1)}, []byte("test4"))
iavlStore.Set([]byte{byte(255), byte(255), byte(255)}, []byte("test4"))
var i int
iter := types.KVStoreReversePrefixIterator(iavlStore, []byte("test"))
expected := []string{"test3", "test2", "test1"}
for i = 0; iter.Valid(); iter.Next() {
expectedKey := expected[i]
key, value := iter.Key(), iter.Value()
require.EqualValues(t, key, expectedKey)
require.EqualValues(t, value, expectedKey)
i++
}
require.Equal(t, len(expected), i)
iter = types.KVStoreReversePrefixIterator(iavlStore, []byte{byte(55), byte(255), byte(255)})
expected2 := [][]byte{
{byte(55), byte(255), byte(255), byte(255)},
{byte(55), byte(255), byte(255), byte(1)},
{byte(55), byte(255), byte(255), byte(0)},
}
for i = 0; iter.Valid(); iter.Next() {
expectedKey := expected2[i]
key, value := iter.Key(), iter.Value()
require.EqualValues(t, key, expectedKey)
require.EqualValues(t, value, []byte("test4"))
i++
}
require.Equal(t, len(expected), i)
iter = types.KVStoreReversePrefixIterator(iavlStore, []byte{byte(255), byte(255)})
expected2 = [][]byte{
{byte(255), byte(255), byte(255)},
{byte(255), byte(255), byte(1)},
{byte(255), byte(255), byte(0)},
}
for i = 0; iter.Valid(); iter.Next() {
expectedKey := expected2[i]
key, value := iter.Key(), iter.Value()
require.EqualValues(t, key, expectedKey)
require.EqualValues(t, value, []byte("test4"))
i++
}
require.Equal(t, len(expected), i)
}
func nextVersion(iavl *Store) {
key := []byte(fmt.Sprintf("Key for tree: %d", iavl.LastCommitID().Version))
value := []byte(fmt.Sprintf("Value for tree: %d", iavl.LastCommitID().Version))
iavl.Set(key, value)
iavl.Commit()
}
func TestIAVLNoPrune(t *testing.T) {
db := wrapper.NewDBWrapper(dbm.NewMemDB())
tree := iavl.NewMutableTree(db, cacheSize, false, log.NewNopLogger())
iavlStore := UnsafeNewStore(tree)
nextVersion(iavlStore)
for i := 1; i < 100; i++ {
for j := 1; j <= i; j++ {
require.True(t, iavlStore.VersionExists(int64(j)),
"Missing version %d with latest version %d. Should be storing all versions",
j, i)
}
nextVersion(iavlStore)
}
}
func TestIAVLStoreQuery(t *testing.T) {
db := wrapper.NewDBWrapper(dbm.NewMemDB())
tree := iavl.NewMutableTree(db, cacheSize, false, log.NewNopLogger())
iavlStore := UnsafeNewStore(tree)
k1, v1 := []byte("key1"), []byte("val1")
k2, v2 := []byte("key2"), []byte("val2")
v3 := []byte("val3")
ksub := []byte("key")
KVs0 := kv.Pairs{}
KVs1 := kv.Pairs{
Pairs: []kv.Pair{
{Key: k1, Value: v1},
{Key: k2, Value: v2},
},
}
KVs2 := kv.Pairs{
Pairs: []kv.Pair{
{Key: k1, Value: v3},
{Key: k2, Value: v2},
},
}
valExpSubEmpty, err := KVs0.Marshal()
require.NoError(t, err)
valExpSub1, err := KVs1.Marshal()
require.NoError(t, err)
valExpSub2, err := KVs2.Marshal()
require.NoError(t, err)
cid := iavlStore.Commit()
ver := cid.Version
query := types.RequestQuery{Path: "/key", Data: k1, Height: ver}
querySub := types.RequestQuery{Path: "/subspace", Data: ksub, Height: ver}
// query subspace before anything set
qres, err := iavlStore.Query(&querySub)
require.NoError(t, err)
require.Equal(t, uint32(0), qres.Code)
require.Equal(t, valExpSubEmpty, qres.Value)
// set data
iavlStore.Set(k1, v1)
iavlStore.Set(k2, v2)
// set data without commit, doesn't show up
qres, err = iavlStore.Query(&query)
require.NoError(t, err)
require.Equal(t, uint32(0), qres.Code)
require.Nil(t, qres.Value)
// commit it, but still don't see on old version
cid = iavlStore.Commit()
qres, err = iavlStore.Query(&query)
require.NoError(t, err)
require.Equal(t, uint32(0), qres.Code)
require.Nil(t, qres.Value)
// but yes on the new version
query.Height = cid.Version
qres, err = iavlStore.Query(&query)
require.NoError(t, err)
require.Equal(t, uint32(0), qres.Code)
require.Equal(t, v1, qres.Value)
// and for the subspace
qres, err = iavlStore.Query(&querySub)
require.NoError(t, err)
require.Equal(t, uint32(0), qres.Code)
require.Equal(t, valExpSub1, qres.Value)
// modify
iavlStore.Set(k1, v3)
cid = iavlStore.Commit()
// query will return old values, as height is fixed
qres, err = iavlStore.Query(&query)
require.NoError(t, err)
require.Equal(t, uint32(0), qres.Code)
require.Equal(t, v1, qres.Value)
// update to latest in the query and we are happy
query.Height = cid.Version
qres, err = iavlStore.Query(&query)
require.NoError(t, err)
require.Equal(t, uint32(0), qres.Code)
require.Equal(t, v3, qres.Value)
query2 := types.RequestQuery{Path: "/key", Data: k2, Height: cid.Version}
qres, err = iavlStore.Query(&query2)
require.NoError(t, err)
require.Equal(t, uint32(0), qres.Code)
require.Equal(t, v2, qres.Value)
// and for the subspace
qres, err = iavlStore.Query(&querySub)
require.NoError(t, err)
require.Equal(t, uint32(0), qres.Code)
require.Equal(t, valExpSub2, qres.Value)
// default (height 0) will show latest -1
query0 := types.RequestQuery{Path: "/key", Data: k1}
qres, err = iavlStore.Query(&query0)
require.NoError(t, err)
require.Equal(t, uint32(0), qres.Code)
require.Equal(t, v1, qres.Value)
}
func BenchmarkIAVLIteratorNext(b *testing.B) {
b.ReportAllocs()
db := wrapper.NewDBWrapper(dbm.NewMemDB())
treeSize := 1000
tree := iavl.NewMutableTree(db, cacheSize, false, log.NewNopLogger())
for i := 0; i < treeSize; i++ {
key := randBytes(4)
value := randBytes(50)
_, err := tree.Set(key, value)
require.NoError(b, err)
}
iavlStore := UnsafeNewStore(tree)
iterators := make([]types.Iterator, b.N/treeSize)
for i := 0; i < len(iterators); i++ {
iterators[i] = iavlStore.Iterator([]byte{0}, []byte{255, 255, 255, 255, 255})
}
b.ResetTimer()
for i := 0; i < len(iterators); i++ {
iter := iterators[i]
for j := 0; j < treeSize; j++ {
iter.Next()
}
}
}
func TestSetInitialVersion(t *testing.T) {
testCases := []struct {
name string
storeFn func(db *dbm.MemDB) *Store
expPanic bool
}{
{
"works with a mutable tree",
func(db *dbm.MemDB) *Store {
tree := iavl.NewMutableTree(wrapper.NewDBWrapper(db), cacheSize, false, log.NewNopLogger())
store := UnsafeNewStore(tree)
return store
}, false,
},
{
"throws error on immutable tree",
func(db *dbm.MemDB) *Store {
tree := iavl.NewMutableTree(wrapper.NewDBWrapper(db), cacheSize, false, log.NewNopLogger())
store := UnsafeNewStore(tree)
_, version, err := store.tree.SaveVersion()
require.NoError(t, err)
require.Equal(t, int64(1), version)
store, err = store.GetImmutable(1)
require.NoError(t, err)
return store
}, true,
},
}
for _, tc := range testCases {
tc := tc
t.Run(tc.name, func(t *testing.T) {
db := dbm.NewMemDB()
store := tc.storeFn(db)
if tc.expPanic {
require.Panics(t, func() { store.SetInitialVersion(5) })
} else {
store.SetInitialVersion(5)
cid := store.Commit()
require.Equal(t, int64(5), cid.GetVersion())
}
})
}
}
func TestCacheWraps(t *testing.T) {
db := dbm.NewMemDB()
tree, _ := newAlohaTree(t, db)
store := UnsafeNewStore(tree)
cacheWrapper := store.CacheWrap()
require.IsType(t, &cachekv.Store{}, cacheWrapper)
cacheWrappedWithTrace := store.CacheWrapWithTrace(nil, nil)
require.IsType(t, &cachekv.Store{}, cacheWrappedWithTrace)
}
func TestChangeSets(t *testing.T) {
db := dbm.NewMemDB()
treeSize := 1000
treeVersion := int64(10)
targetVersion := int64(6)
tree := iavl.NewMutableTree(wrapper.NewDBWrapper(db), cacheSize, false, log.NewNopLogger(), iavl.FlushThresholdOption(math.MaxInt))
for j := int64(0); j < treeVersion; j++ {
keys := [][]byte{}
for i := 0; i < treeSize; i++ {
keys = append(keys, randBytes(4))
}
sort.Slice(keys, func(p, q int) bool {
return bytes.Compare(keys[p], keys[q]) < 0
})
for i := 0; i < treeSize; i++ {
key := keys[i]
value := randBytes(50)
_, err := tree.Set(key, value)
require.NoError(t, err)
}
_, _, err := tree.SaveVersion()
require.NoError(t, err)
}
changeSets := []*iavl.ChangeSet{}
iavlStore := UnsafeNewStore(tree)
commitID := iavlStore.LastCommitID()
require.NoError(t, iavlStore.TraverseStateChanges(targetVersion+1, treeVersion, func(v int64, cs *iavl.ChangeSet) error {
changeSets = append(changeSets, cs)
return nil
}))
require.NoError(t, iavlStore.LoadVersionForOverwriting(targetVersion))
for i, cs := range changeSets {
v, err := tree.SaveChangeSet(cs)
require.NoError(t, err)
require.Equal(t, v, targetVersion+int64(i+1))
}
restoreCommitID := iavlStore.LastCommitID()
require.Equal(t, commitID, restoreCommitID)
}

98
store/iavl/tree.go Normal file
View File

@ -0,0 +1,98 @@
package iavl
import (
"fmt"
"github.com/cosmos/iavl"
idb "github.com/cosmos/iavl/db"
)
var (
_ Tree = (*immutableTree)(nil)
_ Tree = (*iavl.MutableTree)(nil)
)
type (
// Tree defines an interface that both mutable and immutable IAVL trees
// must implement. For mutable IAVL trees, the interface is directly
// implemented by an iavl.MutableTree. For an immutable IAVL tree, a wrapper
// must be made.
Tree interface {
Has(key []byte) (bool, error)
Get(key []byte) ([]byte, error)
Set(key, value []byte) (bool, error)
Remove(key []byte) ([]byte, bool, error)
SaveVersion() ([]byte, int64, error)
Version() int64
Hash() []byte
WorkingHash() []byte
VersionExists(version int64) bool
DeleteVersionsTo(version int64) error
GetVersioned(key []byte, version int64) ([]byte, error)
GetImmutable(version int64) (*iavl.ImmutableTree, error)
SetInitialVersion(version uint64)
Iterator(start, end []byte, ascending bool) (idb.Iterator, error)
AvailableVersions() []int
LoadVersionForOverwriting(targetVersion int64) error
TraverseStateChanges(startVersion, endVersion int64, fn func(version int64, changeSet *iavl.ChangeSet) error) error
}
// immutableTree is a simple wrapper around a reference to an iavl.ImmutableTree
// that implements the Tree interface. It should only be used for querying
// and iteration, specifically at previous heights.
immutableTree struct {
*iavl.ImmutableTree
}
)
func (it *immutableTree) Set(_, _ []byte) (bool, error) {
panic("cannot call 'Set' on an immutable IAVL tree")
}
func (it *immutableTree) Remove(_ []byte) ([]byte, bool, error) {
panic("cannot call 'Remove' on an immutable IAVL tree")
}
func (it *immutableTree) SaveVersion() ([]byte, int64, error) {
panic("cannot call 'SaveVersion' on an immutable IAVL tree")
}
func (it *immutableTree) DeleteVersionsTo(_ int64) error {
panic("cannot call 'DeleteVersionsTo' on an immutable IAVL tree")
}
func (it *immutableTree) SetInitialVersion(_ uint64) {
panic("cannot call 'SetInitialVersion' on an immutable IAVL tree")
}
func (it *immutableTree) VersionExists(version int64) bool {
return it.Version() == version
}
func (it *immutableTree) GetVersioned(key []byte, version int64) ([]byte, error) {
if it.Version() != version {
return nil, fmt.Errorf("version mismatch on immutable IAVL tree; got: %d, expected: %d", version, it.Version())
}
return it.Get(key)
}
func (it *immutableTree) GetImmutable(version int64) (*iavl.ImmutableTree, error) {
if it.Version() != version {
return nil, fmt.Errorf("version mismatch on immutable IAVL tree; got: %d, expected: %d", version, it.Version())
}
return it.ImmutableTree, nil
}
func (it *immutableTree) AvailableVersions() []int {
return []int{}
}
func (it *immutableTree) LoadVersionForOverwriting(targetVersion int64) error {
panic("cannot call 'LoadVersionForOverwriting' on an immutable IAVL tree")
}
func (it *immutableTree) WorkingHash() []byte {
panic("cannot call 'WorkingHash' on an immutable IAVL tree")
}

41
store/iavl/tree_test.go Normal file
View File

@ -0,0 +1,41 @@
package iavl
import (
"testing"
dbm "github.com/cosmos/cosmos-db"
"github.com/cosmos/iavl"
"github.com/stretchr/testify/require"
"cosmossdk.io/log"
"cosmossdk.io/store/wrapper"
)
func TestImmutableTreePanics(t *testing.T) {
t.Parallel()
immTree := iavl.NewImmutableTree(wrapper.NewDBWrapper(dbm.NewMemDB()), 100, false, log.NewNopLogger())
it := &immutableTree{immTree}
require.Panics(t, func() {
_, err := it.Set([]byte{}, []byte{})
require.NoError(t, err)
})
require.Panics(t, func() {
_, _, err := it.Remove([]byte{})
require.NoError(t, err)
})
require.Panics(t, func() { _, _, _ = it.SaveVersion() })
require.Panics(t, func() { _ = it.DeleteVersionsTo(int64(1)) })
val, err := it.GetVersioned(nil, 1)
require.Error(t, err)
require.Nil(t, val)
imm, err := it.GetImmutable(1)
require.Error(t, err)
require.Nil(t, imm)
imm, err = it.GetImmutable(0)
require.NoError(t, err)
require.NotNil(t, imm)
require.Equal(t, immTree, imm)
}

View File

@ -1,2 +1,2 @@
// Package conv provides internal functions for conversions and data manipulation
// Package conv provides internal functions for convertions and data manipulation
package conv

View File

@ -0,0 +1,17 @@
package kv
import "fmt"
// AssertKeyAtLeastLength panics when store key length is less than the given length.
func AssertKeyAtLeastLength(bz []byte, length int) {
if len(bz) < length {
panic(fmt.Sprintf("expected key of length at least %d, got %d", length, len(bz)))
}
}
// AssertKeyLength panics when store key length is not equal to the given length.
func AssertKeyLength(bz []byte, length int) {
if len(bz) != length {
panic(fmt.Sprintf("unexpected key length; got: %d, expected: %d", len(bz), length))
}
}

28
store/internal/kv/kv.go Normal file
View File

@ -0,0 +1,28 @@
package kv
import (
"bytes"
"sort"
)
func (kvs Pairs) Len() int { return len(kvs.Pairs) }
func (kvs Pairs) Less(i, j int) bool {
switch bytes.Compare(kvs.Pairs[i].Key, kvs.Pairs[j].Key) {
case -1:
return true
case 0:
return bytes.Compare(kvs.Pairs[i].Value, kvs.Pairs[j].Value) < 0
case 1:
return false
default:
panic("invalid comparison result")
}
}
func (kvs Pairs) Swap(i, j int) { kvs.Pairs[i], kvs.Pairs[j] = kvs.Pairs[j], kvs.Pairs[i] }
// Sort invokes sort.Sort on kvs.
func (kvs Pairs) Sort() { sort.Sort(kvs) }

559
store/internal/kv/kv.pb.go Normal file
View File

@ -0,0 +1,559 @@
// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: cosmos/store/internal/kv/v1beta1/kv.proto
package kv
import (
fmt "fmt"
_ "github.com/cosmos/gogoproto/gogoproto"
proto "github.com/cosmos/gogoproto/proto"
io "io"
math "math"
math_bits "math/bits"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
// Pairs defines a repeated slice of Pair objects.
type Pairs struct {
Pairs []Pair `protobuf:"bytes,1,rep,name=pairs,proto3" json:"pairs"`
}
func (m *Pairs) Reset() { *m = Pairs{} }
func (m *Pairs) String() string { return proto.CompactTextString(m) }
func (*Pairs) ProtoMessage() {}
func (*Pairs) Descriptor() ([]byte, []int) {
return fileDescriptor_534782c4083e056d, []int{0}
}
func (m *Pairs) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *Pairs) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_Pairs.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *Pairs) XXX_Merge(src proto.Message) {
xxx_messageInfo_Pairs.Merge(m, src)
}
func (m *Pairs) XXX_Size() int {
return m.Size()
}
func (m *Pairs) XXX_DiscardUnknown() {
xxx_messageInfo_Pairs.DiscardUnknown(m)
}
var xxx_messageInfo_Pairs proto.InternalMessageInfo
func (m *Pairs) GetPairs() []Pair {
if m != nil {
return m.Pairs
}
return nil
}
// Pair defines a key/value bytes tuple.
type Pair struct {
Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
}
func (m *Pair) Reset() { *m = Pair{} }
func (m *Pair) String() string { return proto.CompactTextString(m) }
func (*Pair) ProtoMessage() {}
func (*Pair) Descriptor() ([]byte, []int) {
return fileDescriptor_534782c4083e056d, []int{1}
}
func (m *Pair) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *Pair) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_Pair.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *Pair) XXX_Merge(src proto.Message) {
xxx_messageInfo_Pair.Merge(m, src)
}
func (m *Pair) XXX_Size() int {
return m.Size()
}
func (m *Pair) XXX_DiscardUnknown() {
xxx_messageInfo_Pair.DiscardUnknown(m)
}
var xxx_messageInfo_Pair proto.InternalMessageInfo
func (m *Pair) GetKey() []byte {
if m != nil {
return m.Key
}
return nil
}
func (m *Pair) GetValue() []byte {
if m != nil {
return m.Value
}
return nil
}
func init() {
proto.RegisterType((*Pairs)(nil), "cosmos.store.internal.kv.v1beta1.Pairs")
proto.RegisterType((*Pair)(nil), "cosmos.store.internal.kv.v1beta1.Pair")
}
func init() {
proto.RegisterFile("cosmos/store/internal/kv/v1beta1/kv.proto", fileDescriptor_534782c4083e056d)
}
var fileDescriptor_534782c4083e056d = []byte{
// 217 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xd2, 0x4c, 0xce, 0x2f, 0xce,
0xcd, 0x2f, 0xd6, 0x2f, 0x2e, 0xc9, 0x2f, 0x4a, 0xd5, 0xcf, 0xcc, 0x2b, 0x49, 0x2d, 0xca, 0x4b,
0xcc, 0xd1, 0xcf, 0x2e, 0xd3, 0x2f, 0x33, 0x4c, 0x4a, 0x2d, 0x49, 0x34, 0xd4, 0xcf, 0x2e, 0xd3,
0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x52, 0x80, 0x28, 0xd5, 0x03, 0x2b, 0xd5, 0x83, 0x29, 0xd5,
0xcb, 0x2e, 0xd3, 0x83, 0x2a, 0x95, 0x12, 0x49, 0xcf, 0x4f, 0xcf, 0x07, 0x2b, 0xd6, 0x07, 0xb1,
0x20, 0xfa, 0x94, 0xbc, 0xb9, 0x58, 0x03, 0x12, 0x33, 0x8b, 0x8a, 0x85, 0x9c, 0xb8, 0x58, 0x0b,
0x40, 0x0c, 0x09, 0x46, 0x05, 0x66, 0x0d, 0x6e, 0x23, 0x35, 0x3d, 0x42, 0x06, 0xea, 0x81, 0xf4,
0x39, 0xb1, 0x9c, 0xb8, 0x27, 0xcf, 0x10, 0x04, 0xd1, 0xaa, 0xa4, 0xc7, 0xc5, 0x02, 0x12, 0x14,
0x12, 0xe0, 0x62, 0xce, 0x4e, 0xad, 0x94, 0x60, 0x54, 0x60, 0xd4, 0xe0, 0x09, 0x02, 0x31, 0x85,
0x44, 0xb8, 0x58, 0xcb, 0x12, 0x73, 0x4a, 0x53, 0x25, 0x98, 0xc0, 0x62, 0x10, 0x8e, 0x93, 0xc5,
0x89, 0x47, 0x72, 0x8c, 0x17, 0x1e, 0xc9, 0x31, 0x3e, 0x78, 0x24, 0xc7, 0x38, 0xe1, 0xb1, 0x1c,
0xc3, 0x85, 0xc7, 0x72, 0x0c, 0x37, 0x1e, 0xcb, 0x31, 0x44, 0xc9, 0x41, 0x6c, 0x2f, 0x4e, 0xc9,
0xd6, 0xcb, 0xcc, 0xc7, 0xf4, 0x7f, 0x12, 0x1b, 0xd8, 0xf5, 0xc6, 0x80, 0x00, 0x00, 0x00, 0xff,
0xff, 0x5d, 0xad, 0x97, 0xdd, 0x22, 0x01, 0x00, 0x00,
}
func (m *Pairs) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *Pairs) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *Pairs) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if len(m.Pairs) > 0 {
for iNdEx := len(m.Pairs) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Pairs[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintKv(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
}
}
return len(dAtA) - i, nil
}
func (m *Pair) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *Pair) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *Pair) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if len(m.Value) > 0 {
i -= len(m.Value)
copy(dAtA[i:], m.Value)
i = encodeVarintKv(dAtA, i, uint64(len(m.Value)))
i--
dAtA[i] = 0x12
}
if len(m.Key) > 0 {
i -= len(m.Key)
copy(dAtA[i:], m.Key)
i = encodeVarintKv(dAtA, i, uint64(len(m.Key)))
i--
dAtA[i] = 0xa
}
return len(dAtA) - i, nil
}
func encodeVarintKv(dAtA []byte, offset int, v uint64) int {
offset -= sovKv(v)
base := offset
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
return base
}
func (m *Pairs) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if len(m.Pairs) > 0 {
for _, e := range m.Pairs {
l = e.Size()
n += 1 + l + sovKv(uint64(l))
}
}
return n
}
func (m *Pair) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = len(m.Key)
if l > 0 {
n += 1 + l + sovKv(uint64(l))
}
l = len(m.Value)
if l > 0 {
n += 1 + l + sovKv(uint64(l))
}
return n
}
func sovKv(x uint64) (n int) {
return (math_bits.Len64(x|1) + 6) / 7
}
func sozKv(x uint64) (n int) {
return sovKv(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (m *Pairs) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKv
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Pairs: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Pairs: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Pairs", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKv
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthKv
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthKv
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Pairs = append(m.Pairs, Pair{})
if err := m.Pairs[len(m.Pairs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipKv(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthKv
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *Pair) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKv
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Pair: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Pair: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKv
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthKv
}
postIndex := iNdEx + byteLen
if postIndex < 0 {
return ErrInvalidLengthKv
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
if m.Key == nil {
m.Key = []byte{}
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowKv
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthKv
}
postIndex := iNdEx + byteLen
if postIndex < 0 {
return ErrInvalidLengthKv
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...)
if m.Value == nil {
m.Value = []byte{}
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipKv(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthKv
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func skipKv(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
depth := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowKv
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
wireType := int(wire & 0x7)
switch wireType {
case 0:
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowKv
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
iNdEx++
if dAtA[iNdEx-1] < 0x80 {
break
}
}
case 1:
iNdEx += 8
case 2:
var length int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowKv
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
length |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if length < 0 {
return 0, ErrInvalidLengthKv
}
iNdEx += length
case 3:
depth++
case 4:
if depth == 0 {
return 0, ErrUnexpectedEndOfGroupKv
}
depth--
case 5:
iNdEx += 4
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
if iNdEx < 0 {
return 0, ErrInvalidLengthKv
}
if depth == 0 {
return iNdEx, nil
}
}
return 0, io.ErrUnexpectedEOF
}
var (
ErrInvalidLengthKv = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowKv = fmt.Errorf("proto: integer overflow")
ErrUnexpectedEndOfGroupKv = fmt.Errorf("proto: unexpected end of group")
)

View File

@ -0,0 +1,13 @@
package maps
import "testing"
func BenchmarkKVPairBytes(b *testing.B) {
kvp := NewKVPair(make([]byte, 128), make([]byte, 1e6))
b.ResetTimer()
b.ReportAllocs()
for i := 0; i < b.N; i++ {
b.SetBytes(int64(len(kvp.Bytes())))
}
}

216
store/internal/maps/maps.go Normal file
View File

@ -0,0 +1,216 @@
package maps
import (
"encoding/binary"
"github.com/cometbft/cometbft/crypto/merkle"
"github.com/cometbft/cometbft/crypto/tmhash"
cmtprotocrypto "github.com/cometbft/cometbft/proto/tendermint/crypto"
"cosmossdk.io/store/internal/kv"
"cosmossdk.io/store/internal/tree"
)
// merkleMap defines a merkle-ized tree from a map. Leave values are treated as
// hash(key) | hash(value). Leaves are sorted before Merkle hashing.
type merkleMap struct {
kvs kv.Pairs
sorted bool
}
func newMerkleMap() *merkleMap {
return &merkleMap{
kvs: kv.Pairs{},
sorted: false,
}
}
// Set creates a kv.Pair from the provided key and value. The value is hashed prior
// to creating a kv.Pair. The created kv.Pair is appended to the MerkleMap's slice
// of kv.Pairs. Whenever called, the MerkleMap must be resorted.
func (sm *merkleMap) set(key string, value []byte) {
byteKey := []byte(key)
assertValidKey(byteKey)
sm.sorted = false
// The value is hashed, so you can check for equality with a cached value (say)
// and make a determination to fetch or not.
vhash := tmhash.Sum(value)
sm.kvs.Pairs = append(sm.kvs.Pairs, kv.Pair{
Key: byteKey,
Value: vhash,
})
}
// Hash returns the merkle root of items sorted by key. Note, it is unstable.
func (sm *merkleMap) hash() []byte {
sm.sort()
return hashKVPairs(sm.kvs)
}
func (sm *merkleMap) sort() {
if sm.sorted {
return
}
sm.kvs.Sort()
sm.sorted = true
}
// hashKVPairs hashes a kvPair and creates a merkle tree where the leaves are
// byte slices.
func hashKVPairs(kvs kv.Pairs) []byte {
kvsH := make([][]byte, len(kvs.Pairs))
for i, kvp := range kvs.Pairs {
kvsH[i] = KVPair(kvp).Bytes()
}
return tree.HashFromByteSlices(kvsH)
}
// ---------------------------------------------
// Merkle tree from a map.
// Leaves are `hash(key) | hash(value)`.
// Leaves are sorted before Merkle hashing.
type simpleMap struct {
Kvs kv.Pairs
sorted bool
}
func newSimpleMap() *simpleMap {
return &simpleMap{
Kvs: kv.Pairs{},
sorted: false,
}
}
// Set creates a kv pair of the key and the hash of the value,
// and then appends it to SimpleMap's kv pairs.
func (sm *simpleMap) Set(key string, value []byte) {
byteKey := []byte(key)
assertValidKey(byteKey)
sm.sorted = false
// The value is hashed, so you can
// check for equality with a cached value (say)
// and make a determination to fetch or not.
vhash := tmhash.Sum(value)
sm.Kvs.Pairs = append(sm.Kvs.Pairs, kv.Pair{
Key: byteKey,
Value: vhash,
})
}
// Hash Merkle root hash of items sorted by key
// (UNSTABLE: and by value too if duplicate key).
func (sm *simpleMap) Hash() []byte {
sm.Sort()
return hashKVPairs(sm.Kvs)
}
func (sm *simpleMap) Sort() {
if sm.sorted {
return
}
sm.Kvs.Sort()
sm.sorted = true
}
// Returns a copy of sorted KVPairs.
// NOTE these contain the hashed key and value.
func (sm *simpleMap) KVPairs() kv.Pairs {
sm.Sort()
kvs := kv.Pairs{
Pairs: make([]kv.Pair, len(sm.Kvs.Pairs)),
}
copy(kvs.Pairs, sm.Kvs.Pairs)
return kvs
}
//----------------------------------------
// A local extension to KVPair that can be hashed.
// Key and value are length prefixed and concatenated,
// then hashed.
type KVPair kv.Pair
// NewKVPair takes in a key and value and creates a kv.Pair
// wrapped in the local extension KVPair
func NewKVPair(key, value []byte) KVPair {
return KVPair(kv.Pair{
Key: key,
Value: value,
})
}
// Bytes returns key || value, with both the
// key and value length prefixed.
func (kv KVPair) Bytes() []byte {
// In the worst case:
// * 8 bytes to Uvarint encode the length of the key
// * 8 bytes to Uvarint encode the length of the value
// So preallocate for the worst case, which will in total
// be a maximum of 14 bytes wasted, if len(key)=1, len(value)=1,
// but that's going to rare.
buf := make([]byte, 8+len(kv.Key)+8+len(kv.Value))
// Encode the key, prefixed with its length.
nlk := binary.PutUvarint(buf, uint64(len(kv.Key)))
nk := copy(buf[nlk:], kv.Key)
// Encode the value, prefixing with its length.
nlv := binary.PutUvarint(buf[nlk+nk:], uint64(len(kv.Value)))
nv := copy(buf[nlk+nk+nlv:], kv.Value)
return buf[:nlk+nk+nlv+nv]
}
// HashFromMap computes a merkle tree from sorted map and returns the merkle
// root.
func HashFromMap(m map[string][]byte) []byte {
mm := newMerkleMap()
for k, v := range m {
mm.set(k, v)
}
return mm.hash()
}
// ProofsFromMap generates proofs from a map. The keys/values of the map will be used as the keys/values
// in the underlying key-value pairs.
// The keys are sorted before the proofs are computed.
func ProofsFromMap(m map[string][]byte) ([]byte, map[string]*cmtprotocrypto.Proof, []string) {
sm := newSimpleMap()
for k, v := range m {
sm.Set(k, v)
}
sm.Sort()
kvs := sm.Kvs
kvsBytes := make([][]byte, len(kvs.Pairs))
for i, kvp := range kvs.Pairs {
kvsBytes[i] = KVPair(kvp).Bytes()
}
rootHash, proofList := merkle.ProofsFromByteSlices(kvsBytes)
proofs := make(map[string]*cmtprotocrypto.Proof)
keys := make([]string, len(proofList))
for i, kvp := range kvs.Pairs {
proofs[string(kvp.Key)] = proofList[i].ToProto()
keys[i] = string(kvp.Key)
}
return rootHash, proofs, keys
}
func assertValidKey(key []byte) {
if len(key) == 0 {
panic("key is nil")
}
}

View File

@ -0,0 +1,104 @@
package maps
import (
"fmt"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestEmptyKeyMerkleMap(t *testing.T) {
db := newMerkleMap()
require.Panics(t, func() { db.set("", []byte("value")) }, "setting an empty key should panic")
}
func TestMerkleMap(t *testing.T) {
tests := []struct {
keys []string
values []string // each string gets converted to []byte in test
want string
}{
{[]string{}, []string{}, "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"},
{[]string{"key1"}, []string{"value1"}, "a44d3cc7daba1a4600b00a2434b30f8b970652169810d6dfa9fb1793a2189324"},
{[]string{"key1"}, []string{"value2"}, "0638e99b3445caec9d95c05e1a3fc1487b4ddec6a952ff337080360b0dcc078c"},
// swap order with 2 keys
{
[]string{"key1", "key2"},
[]string{"value1", "value2"},
"8fd19b19e7bb3f2b3ee0574027d8a5a4cec370464ea2db2fbfa5c7d35bb0cff3",
},
{
[]string{"key2", "key1"},
[]string{"value2", "value1"},
"8fd19b19e7bb3f2b3ee0574027d8a5a4cec370464ea2db2fbfa5c7d35bb0cff3",
},
// swap order with 3 keys
{
[]string{"key1", "key2", "key3"},
[]string{"value1", "value2", "value3"},
"1dd674ec6782a0d586a903c9c63326a41cbe56b3bba33ed6ff5b527af6efb3dc",
},
{
[]string{"key1", "key3", "key2"},
[]string{"value1", "value3", "value2"},
"1dd674ec6782a0d586a903c9c63326a41cbe56b3bba33ed6ff5b527af6efb3dc",
},
}
for i, tc := range tests {
db := newMerkleMap()
for i := 0; i < len(tc.keys); i++ {
db.set(tc.keys[i], []byte(tc.values[i]))
}
got := db.hash()
assert.Equal(t, tc.want, fmt.Sprintf("%x", got), "Hash didn't match on tc %d", i)
}
}
func TestEmptyKeySimpleMap(t *testing.T) {
db := newSimpleMap()
require.Panics(t, func() { db.Set("", []byte("value")) }, "setting an empty key should panic")
}
func TestSimpleMap(t *testing.T) {
tests := []struct {
keys []string
values []string // each string gets converted to []byte in test
want string
}{
{[]string{}, []string{}, "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"},
{[]string{"key1"}, []string{"value1"}, "a44d3cc7daba1a4600b00a2434b30f8b970652169810d6dfa9fb1793a2189324"},
{[]string{"key1"}, []string{"value2"}, "0638e99b3445caec9d95c05e1a3fc1487b4ddec6a952ff337080360b0dcc078c"},
// swap order with 2 keys
{
[]string{"key1", "key2"},
[]string{"value1", "value2"},
"8fd19b19e7bb3f2b3ee0574027d8a5a4cec370464ea2db2fbfa5c7d35bb0cff3",
},
{
[]string{"key2", "key1"},
[]string{"value2", "value1"},
"8fd19b19e7bb3f2b3ee0574027d8a5a4cec370464ea2db2fbfa5c7d35bb0cff3",
},
// swap order with 3 keys
{
[]string{"key1", "key2", "key3"},
[]string{"value1", "value2", "value3"},
"1dd674ec6782a0d586a903c9c63326a41cbe56b3bba33ed6ff5b527af6efb3dc",
},
{
[]string{"key1", "key3", "key2"},
[]string{"value1", "value3", "value2"},
"1dd674ec6782a0d586a903c9c63326a41cbe56b3bba33ed6ff5b527af6efb3dc",
},
}
for i, tc := range tests {
db := newSimpleMap()
for i := 0; i < len(tc.keys); i++ {
db.Set(tc.keys[i], []byte(tc.values[i]))
}
got := db.Hash()
assert.Equal(t, tc.want, fmt.Sprintf("%x", got), "Hash didn't match on tc %d", i)
}
}

View File

@ -0,0 +1,98 @@
package proofs
import (
"fmt"
"math/bits"
cmtprotocrypto "github.com/cometbft/cometbft/proto/tendermint/crypto"
ics23 "github.com/cosmos/ics23/go"
)
// ConvertExistenceProof will convert the given proof into a valid
// existence proof, if that's what it is.
//
// This is the simplest case of the range proof and we will focus on
// demoing compatibility here
func ConvertExistenceProof(p *cmtprotocrypto.Proof, key, value []byte) (*ics23.ExistenceProof, error) {
path, err := convertInnerOps(p)
if err != nil {
return nil, err
}
proof := &ics23.ExistenceProof{
Key: key,
Value: value,
Leaf: convertLeafOp(),
Path: path,
}
return proof, nil
}
// this is adapted from merkle/hash.go:leafHash()
// and merkle/simple_map.go:KVPair.Bytes()
func convertLeafOp() *ics23.LeafOp {
prefix := []byte{0}
return &ics23.LeafOp{
Hash: ics23.HashOp_SHA256,
PrehashKey: ics23.HashOp_NO_HASH,
PrehashValue: ics23.HashOp_SHA256,
Length: ics23.LengthOp_VAR_PROTO,
Prefix: prefix,
}
}
func convertInnerOps(p *cmtprotocrypto.Proof) ([]*ics23.InnerOp, error) {
inners := make([]*ics23.InnerOp, 0, len(p.Aunts))
path := buildPath(p.Index, p.Total)
if len(p.Aunts) != len(path) {
return nil, fmt.Errorf("calculated a path different length (%d) than provided by SimpleProof (%d)", len(path), len(p.Aunts))
}
for i, aunt := range p.Aunts {
auntRight := path[i]
// combine with: 0x01 || lefthash || righthash
inner := &ics23.InnerOp{Hash: ics23.HashOp_SHA256}
if auntRight {
inner.Prefix = []byte{1}
inner.Suffix = aunt
} else {
inner.Prefix = append([]byte{1}, aunt...)
}
inners = append(inners, inner)
}
return inners, nil
}
// buildPath returns a list of steps from leaf to root
// in each step, true means index is left side, false index is right side
// code adapted from merkle/simple_proof.go:computeHashFromAunts
func buildPath(idx, total int64) []bool {
if total < 2 {
return nil
}
numLeft := getSplitPoint(total)
goLeft := idx < numLeft
// we put goLeft at the end of the array, as we recurse from top to bottom,
// and want the leaf to be first in array, root last
if goLeft {
return append(buildPath(idx, numLeft), goLeft)
}
return append(buildPath(idx-numLeft, total-numLeft), goLeft)
}
func getSplitPoint(length int64) int64 {
if length < 1 {
panic("Trying to split a tree with size < 1")
}
uLength := uint(length)
bitlen := bits.Len(uLength)
k := int64(1 << uint(bitlen-1))
if k == length {
k >>= 1
}
return k
}

View File

@ -0,0 +1,105 @@
package proofs
import (
"bytes"
"fmt"
"testing"
)
func TestLeafOp(t *testing.T) {
proof := GenerateRangeProof(20, Middle)
converted, err := ConvertExistenceProof(proof.Proof, proof.Key, proof.Value)
if err != nil {
t.Fatal(err)
}
leaf := converted.GetLeaf()
if leaf == nil {
t.Fatalf("Missing leaf node")
}
hash, err := leaf.Apply(converted.Key, converted.Value)
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(hash, proof.Proof.LeafHash) {
t.Errorf("Calculated: %X\nExpected: %X", hash, proof.Proof.LeafHash)
}
}
func TestBuildPath(t *testing.T) {
cases := map[string]struct {
idx int64
total int64
expected []bool
}{
"pair left": {
idx: 0,
total: 2,
expected: []bool{true},
},
"pair right": {
idx: 1,
total: 2,
expected: []bool{false},
},
"power of 2": {
idx: 3,
total: 8,
expected: []bool{false, false, true},
},
"size of 7 right most": {
idx: 6,
total: 7,
expected: []bool{false, false},
},
"size of 6 right-left (from top)": {
idx: 4,
total: 6,
expected: []bool{true, false},
},
"size of 6 left-right-left (from top)": {
idx: 2,
total: 7,
expected: []bool{true, false, true},
},
}
for name, tc := range cases {
t.Run(name, func(t *testing.T) {
path := buildPath(tc.idx, tc.total)
if len(path) != len(tc.expected) {
t.Fatalf("Got %v\nExpected %v", path, tc.expected)
}
for i := range path {
if path[i] != tc.expected[i] {
t.Fatalf("Differ at %d\nGot %v\nExpected %v", i, path, tc.expected)
}
}
})
}
}
func TestConvertProof(t *testing.T) {
for i := 0; i < 100; i++ {
t.Run(fmt.Sprintf("Run %d", i), func(t *testing.T) {
proof := GenerateRangeProof(57, Left)
converted, err := ConvertExistenceProof(proof.Proof, proof.Key, proof.Value)
if err != nil {
t.Fatal(err)
}
calc, err := converted.Calculate()
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(calc, proof.RootHash) {
t.Errorf("Calculated: %X\nExpected: %X", calc, proof.RootHash)
}
})
}
}

View File

@ -0,0 +1,103 @@
package proofs
import (
"errors"
"sort"
ics23 "github.com/cosmos/ics23/go"
sdkmaps "cosmossdk.io/store/internal/maps"
)
var (
ErrEmptyKey = errors.New("key is empty")
ErrEmptyKeyInData = errors.New("data contains empty key")
)
/*
CreateMembershipProof will produce a CommitmentProof that the given key (and queries value) exists in the map.
If the key doesn't exist in the tree, this will return an error.
*/
func CreateMembershipProof(data map[string][]byte, key []byte) (*ics23.CommitmentProof, error) {
if len(key) == 0 {
return nil, ErrEmptyKey
}
exist, err := createExistenceProof(data, key)
if err != nil {
return nil, err
}
proof := &ics23.CommitmentProof{
Proof: &ics23.CommitmentProof_Exist{
Exist: exist,
},
}
return proof, nil
}
/*
CreateNonMembershipProof will produce a CommitmentProof that the given key doesn't exist in the map.
If the key exists in the tree, this will return an error.
*/
func CreateNonMembershipProof(data map[string][]byte, key []byte) (*ics23.CommitmentProof, error) {
if len(key) == 0 {
return nil, ErrEmptyKey
}
// ensure this key is not in the store
if _, ok := data[string(key)]; ok {
return nil, errors.New("cannot create non-membership proof if key is in map")
}
keys := SortedKeys(data)
rightidx := sort.SearchStrings(keys, string(key))
var err error
nonexist := &ics23.NonExistenceProof{
Key: key,
}
// include left proof unless key is left of entire map
if rightidx >= 1 {
leftkey := keys[rightidx-1]
nonexist.Left, err = createExistenceProof(data, []byte(leftkey))
if err != nil {
return nil, err
}
}
// include right proof unless key is right of entire map
if rightidx < len(keys) {
rightkey := keys[rightidx]
nonexist.Right, err = createExistenceProof(data, []byte(rightkey))
if err != nil {
return nil, err
}
}
proof := &ics23.CommitmentProof{
Proof: &ics23.CommitmentProof_Nonexist{
Nonexist: nonexist,
},
}
return proof, nil
}
func createExistenceProof(data map[string][]byte, key []byte) (*ics23.ExistenceProof, error) {
for k := range data {
if k == "" {
return nil, ErrEmptyKeyInData
}
}
value, ok := data[string(key)]
if !ok {
return nil, errors.New("cannot make existence proof if key is not in map")
}
_, proofs, _ := sdkmaps.ProofsFromMap(data)
proof := proofs[string(key)]
if proof == nil {
return nil, errors.New("returned no proof for key")
}
return ConvertExistenceProof(proof, key, value)
}

View File

@ -0,0 +1,125 @@
package proofs
import (
"errors"
"testing"
ics23 "github.com/cosmos/ics23/go"
"github.com/stretchr/testify/assert"
)
func TestCreateMembership(t *testing.T) {
cases := map[string]struct {
size int
loc Where
}{
"small left": {size: 100, loc: Left},
"small middle": {size: 100, loc: Middle},
"small right": {size: 100, loc: Right},
"big left": {size: 5431, loc: Left},
"big middle": {size: 5431, loc: Middle},
"big right": {size: 5431, loc: Right},
}
for name, tc := range cases {
t.Run(name, func(t *testing.T) {
data := BuildMap(tc.size)
allkeys := SortedKeys(data)
key := GetKey(allkeys, tc.loc)
nonKey := GetNonKey(allkeys, tc.loc)
// error if the key does not exist
proof, err := CreateMembershipProof(data, []byte(nonKey))
assert.EqualError(t, err, "cannot make existence proof if key is not in map")
assert.Nil(t, proof)
val := data[key]
proof, err = CreateMembershipProof(data, []byte(key))
if err != nil {
t.Fatalf("Creating Proof: %+v", err)
}
if proof.GetExist() == nil {
t.Fatal("Unexpected proof format")
}
root := CalcRoot(data)
err = proof.GetExist().Verify(ics23.TendermintSpec, root, []byte(key), val)
if err != nil {
t.Fatalf("Verifying Proof: %+v", err)
}
valid := ics23.VerifyMembership(ics23.TendermintSpec, root, proof, []byte(key), val)
if !valid {
t.Fatalf("Membership Proof Invalid")
}
})
}
}
func TestCreateNonMembership(t *testing.T) {
cases := map[string]struct {
size int
loc Where
}{
"small left": {size: 100, loc: Left},
"small middle": {size: 100, loc: Middle},
"small right": {size: 100, loc: Right},
"big left": {size: 5431, loc: Left},
"big middle": {size: 5431, loc: Middle},
"big right": {size: 5431, loc: Right},
}
for name, tc := range cases {
t.Run(name, func(t *testing.T) {
data := BuildMap(tc.size)
allkeys := SortedKeys(data)
nonKey := GetNonKey(allkeys, tc.loc)
key := GetKey(allkeys, tc.loc)
// error if the key exists
proof, err := CreateNonMembershipProof(data, []byte(key))
assert.EqualError(t, err, "cannot create non-membership proof if key is in map")
assert.Nil(t, proof)
proof, err = CreateNonMembershipProof(data, []byte(nonKey))
if err != nil {
t.Fatalf("Creating Proof: %+v", err)
}
if proof.GetNonexist() == nil {
t.Fatal("Unexpected proof format")
}
root := CalcRoot(data)
err = proof.GetNonexist().Verify(ics23.TendermintSpec, root, []byte(nonKey))
if err != nil {
t.Fatalf("Verifying Proof: %+v", err)
}
valid := ics23.VerifyNonMembership(ics23.TendermintSpec, root, proof, []byte(nonKey))
if !valid {
t.Fatalf("Non Membership Proof Invalid")
}
})
}
}
func TestInvalidKey(t *testing.T) {
tests := []struct {
name string
f func(data map[string][]byte, key []byte) (*ics23.CommitmentProof, error)
data map[string][]byte
key []byte
err error
}{
{"CreateMembershipProof empty key", CreateMembershipProof, map[string][]byte{"": nil}, []byte(""), ErrEmptyKey},
{"CreateMembershipProof empty key in data", CreateMembershipProof, map[string][]byte{"": nil, " ": nil}, []byte(" "), ErrEmptyKeyInData},
{"CreateNonMembershipProof empty key", CreateNonMembershipProof, map[string][]byte{" ": nil}, []byte(""), ErrEmptyKey},
{"CreateNonMembershipProof empty key in data", CreateNonMembershipProof, map[string][]byte{"": nil}, []byte(" "), ErrEmptyKeyInData},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
_, err := tc.f(tc.data, tc.key)
assert.True(t, errors.Is(err, tc.err))
})
}
}

View File

@ -0,0 +1,101 @@
package proofs
import (
"sort"
cmtprotocrypto "github.com/cometbft/cometbft/proto/tendermint/crypto"
"golang.org/x/exp/maps"
"cosmossdk.io/math/unsafe"
sdkmaps "cosmossdk.io/store/internal/maps"
)
// SimpleResult contains a merkle.SimpleProof along with all data needed to build the confio/proof
type SimpleResult struct {
Key []byte
Value []byte
Proof *cmtprotocrypto.Proof
RootHash []byte
}
// GenerateRangeProof makes a tree of size and returns a range proof for one random element
//
// returns a range proof and the root hash of the tree
func GenerateRangeProof(size int, loc Where) *SimpleResult {
data := BuildMap(size)
root, proofs, allkeys := sdkmaps.ProofsFromMap(data)
key := GetKey(allkeys, loc)
proof := proofs[key]
res := &SimpleResult{
Key: []byte(key),
Value: toValue(key),
Proof: proof,
RootHash: root,
}
return res
}
// Where selects a location for a key - Left, Right, or Middle
type Where int
const (
Left Where = iota
Right
Middle
)
func SortedKeys(data map[string][]byte) []string {
keys := maps.Keys(data)
sort.Strings(keys)
return keys
}
func CalcRoot(data map[string][]byte) []byte {
root, _, _ := sdkmaps.ProofsFromMap(data)
return root
}
// GetKey this returns a key, on Left/Right/Middle
func GetKey(allkeys []string, loc Where) string {
if loc == Left {
return allkeys[0]
}
if loc == Right {
return allkeys[len(allkeys)-1]
}
// select a random index between 1 and allkeys-2
idx := unsafe.NewRand().Int()%(len(allkeys)-2) + 1
return allkeys[idx]
}
// GetNonKey returns a missing key - Left of all, Right of all, or in the Middle
func GetNonKey(allkeys []string, loc Where) string {
if loc == Left {
return string([]byte{1, 1, 1, 1})
}
if loc == Right {
return string([]byte{0xff, 0xff, 0xff, 0xff})
}
// otherwise, next to an existing key (copy before mod)
key := GetKey(allkeys, loc)
key = key[:len(key)-2] + string([]byte{255, 255})
return key
}
func toValue(key string) []byte {
return []byte("value_for_" + key)
}
// BuildMap creates random key/values and stores in a map,
// returns a list of all keys in sorted order
func BuildMap(size int) map[string][]byte {
data := make(map[string][]byte)
// insert lots of info and store the bytes
for i := 0; i < size; i++ {
key := unsafe.Str(20)
data[key] = toValue(key)
}
return data
}

View File

@ -0,0 +1,68 @@
package tree
import (
"crypto/sha256"
"hash"
"math/bits"
)
var (
leafPrefix = []byte{0}
innerPrefix = []byte{1}
)
// HashFromByteSlices computes a Merkle tree where the leaves are the byte slice,
// in the provided order. It follows RFC-6962.
func HashFromByteSlices(items [][]byte) []byte {
return hashFromByteSlices(sha256.New(), items)
}
func hashFromByteSlices(sha hash.Hash, items [][]byte) []byte {
switch len(items) {
case 0:
return emptyHash()
case 1:
return leafHashOpt(sha, items[0])
default:
k := getSplitPoint(int64(len(items)))
left := hashFromByteSlices(sha, items[:k])
right := hashFromByteSlices(sha, items[k:])
return innerHashOpt(sha, left, right)
}
}
// returns tmhash(0x00 || leaf)
func leafHashOpt(s hash.Hash, leaf []byte) []byte {
s.Reset()
s.Write(leafPrefix)
s.Write(leaf)
return s.Sum(nil)
}
func innerHashOpt(s hash.Hash, left, right []byte) []byte {
s.Reset()
s.Write(innerPrefix)
s.Write(left)
s.Write(right)
return s.Sum(nil)
}
// returns tmhash(<empty>)
func emptyHash() []byte {
h := sha256.Sum256([]byte{})
return h[:]
}
// getSplitPoint returns the largest power of 2 less than length
func getSplitPoint(length int64) int64 {
if length < 1 {
panic("Trying to split a tree with size < 1")
}
uLength := uint(length)
bitlen := bits.Len(uLength)
k := int64(1 << uint(bitlen-1))
if k == length {
k >>= 1
}
return k
}

142
store/listenkv/store.go Normal file
View File

@ -0,0 +1,142 @@
package listenkv
import (
"io"
"cosmossdk.io/store/types"
)
var _ types.KVStore = &Store{}
// Store implements the KVStore interface with listening enabled.
// Operations are traced on each core KVStore call and written to any of the
// underlying listeners with the proper key and operation permissions
type Store struct {
parent types.KVStore
listener *types.MemoryListener
parentStoreKey types.StoreKey
}
// NewStore returns a reference to a new traceKVStore given a parent
// KVStore implementation and a buffered writer.
func NewStore(parent types.KVStore, parentStoreKey types.StoreKey, listener *types.MemoryListener) *Store {
return &Store{parent: parent, listener: listener, parentStoreKey: parentStoreKey}
}
// Get implements the KVStore interface. It traces a read operation and
// delegates a Get call to the parent KVStore.
func (s *Store) Get(key []byte) []byte {
value := s.parent.Get(key)
return value
}
// Set implements the KVStore interface. It traces a write operation and
// delegates the Set call to the parent KVStore.
func (s *Store) Set(key, value []byte) {
types.AssertValidKey(key)
s.parent.Set(key, value)
s.listener.OnWrite(s.parentStoreKey, key, value, false)
}
// Delete implements the KVStore interface. It traces a write operation and
// delegates the Delete call to the parent KVStore.
func (s *Store) Delete(key []byte) {
s.parent.Delete(key)
s.listener.OnWrite(s.parentStoreKey, key, nil, true)
}
// Has implements the KVStore interface. It delegates the Has call to the
// parent KVStore.
func (s *Store) Has(key []byte) bool {
return s.parent.Has(key)
}
// Iterator implements the KVStore interface. It delegates the Iterator call
// the to the parent KVStore.
func (s *Store) Iterator(start, end []byte) types.Iterator {
return s.iterator(start, end, true)
}
// ReverseIterator implements the KVStore interface. It delegates the
// ReverseIterator call the to the parent KVStore.
func (s *Store) ReverseIterator(start, end []byte) types.Iterator {
return s.iterator(start, end, false)
}
// iterator facilitates iteration over a KVStore. It delegates the necessary
// calls to it's parent KVStore.
func (s *Store) iterator(start, end []byte, ascending bool) types.Iterator {
var parent types.Iterator
if ascending {
parent = s.parent.Iterator(start, end)
} else {
parent = s.parent.ReverseIterator(start, end)
}
return newTraceIterator(parent, s.listener)
}
type listenIterator struct {
parent types.Iterator
listener *types.MemoryListener
}
func newTraceIterator(parent types.Iterator, listener *types.MemoryListener) types.Iterator {
return &listenIterator{parent: parent, listener: listener}
}
// Domain implements the Iterator interface.
func (li *listenIterator) Domain() (start, end []byte) {
return li.parent.Domain()
}
// Valid implements the Iterator interface.
func (li *listenIterator) Valid() bool {
return li.parent.Valid()
}
// Next implements the Iterator interface.
func (li *listenIterator) Next() {
li.parent.Next()
}
// Key implements the Iterator interface.
func (li *listenIterator) Key() []byte {
key := li.parent.Key()
return key
}
// Value implements the Iterator interface.
func (li *listenIterator) Value() []byte {
value := li.parent.Value()
return value
}
// Close implements the Iterator interface.
func (li *listenIterator) Close() error {
return li.parent.Close()
}
// Error delegates the Error call to the parent iterator.
func (li *listenIterator) Error() error {
return li.parent.Error()
}
// GetStoreType implements the KVStore interface. It returns the underlying
// KVStore type.
func (s *Store) GetStoreType() types.StoreType {
return s.parent.GetStoreType()
}
// CacheWrap implements the KVStore interface. It panics as a Store
// cannot be cache wrapped.
func (s *Store) CacheWrap() types.CacheWrap {
panic("cannot CacheWrap a ListenKVStore")
}
// CacheWrapWithTrace implements the KVStore interface. It panics as a
// Store cannot be cache wrapped.
func (s *Store) CacheWrapWithTrace(_ io.Writer, _ types.TraceContext) types.CacheWrap {
panic("cannot CacheWrapWithTrace a ListenKVStore")
}

View File

@ -0,0 +1,281 @@
package listenkv_test
import (
"fmt"
"testing"
dbm "github.com/cosmos/cosmos-db"
"github.com/stretchr/testify/require"
"cosmossdk.io/store/dbadapter"
"cosmossdk.io/store/internal/kv"
"cosmossdk.io/store/listenkv"
"cosmossdk.io/store/prefix"
"cosmossdk.io/store/types"
)
func bz(s string) []byte { return []byte(s) }
func keyFmt(i int) []byte { return bz(fmt.Sprintf("key%0.8d", i)) }
func valFmt(i int) []byte { return bz(fmt.Sprintf("value%0.8d", i)) }
var kvPairs = []kv.Pair{
{Key: keyFmt(1), Value: valFmt(1)},
{Key: keyFmt(2), Value: valFmt(2)},
{Key: keyFmt(3), Value: valFmt(3)},
}
var testStoreKey = types.NewKVStoreKey("listen_test")
func newListenKVStore(listener *types.MemoryListener) *listenkv.Store {
store := newEmptyListenKVStore(listener)
for _, kvPair := range kvPairs {
store.Set(kvPair.Key, kvPair.Value)
}
return store
}
func newEmptyListenKVStore(listener *types.MemoryListener) *listenkv.Store {
memDB := dbadapter.Store{DB: dbm.NewMemDB()}
return listenkv.NewStore(memDB, testStoreKey, listener)
}
func TestListenKVStoreGet(t *testing.T) {
testCases := []struct {
key []byte
expectedValue []byte
}{
{
key: kvPairs[0].Key,
expectedValue: kvPairs[0].Value,
},
{
key: []byte("does-not-exist"),
expectedValue: nil,
},
}
for _, tc := range testCases {
listener := types.NewMemoryListener()
store := newListenKVStore(listener)
value := store.Get(tc.key)
require.Equal(t, tc.expectedValue, value)
}
}
func TestListenKVStoreSet(t *testing.T) {
testCases := []struct {
key []byte
value []byte
expectedOut *types.StoreKVPair
}{
{
key: kvPairs[0].Key,
value: kvPairs[0].Value,
expectedOut: &types.StoreKVPair{
Key: kvPairs[0].Key,
Value: kvPairs[0].Value,
StoreKey: testStoreKey.Name(),
Delete: false,
},
},
{
key: kvPairs[1].Key,
value: kvPairs[1].Value,
expectedOut: &types.StoreKVPair{
Key: kvPairs[1].Key,
Value: kvPairs[1].Value,
StoreKey: testStoreKey.Name(),
Delete: false,
},
},
{
key: kvPairs[2].Key,
value: kvPairs[2].Value,
expectedOut: &types.StoreKVPair{
Key: kvPairs[2].Key,
Value: kvPairs[2].Value,
StoreKey: testStoreKey.Name(),
Delete: false,
},
},
}
for _, tc := range testCases {
listener := types.NewMemoryListener()
store := newEmptyListenKVStore(listener)
store.Set(tc.key, tc.value)
storeKVPair := listener.PopStateCache()[0]
require.Equal(t, tc.expectedOut, storeKVPair)
}
listener := types.NewMemoryListener()
store := newEmptyListenKVStore(listener)
require.Panics(t, func() { store.Set([]byte(""), []byte("value")) }, "setting an empty key should panic")
require.Panics(t, func() { store.Set(nil, []byte("value")) }, "setting a nil key should panic")
}
func TestListenKVStoreDelete(t *testing.T) {
testCases := []struct {
key []byte
expectedOut *types.StoreKVPair
}{
{
key: kvPairs[0].Key,
expectedOut: &types.StoreKVPair{
Key: kvPairs[0].Key,
Value: nil,
StoreKey: testStoreKey.Name(),
Delete: true,
},
},
}
for _, tc := range testCases {
listener := types.NewMemoryListener()
store := newListenKVStore(listener)
store.Delete(tc.key)
cache := listener.PopStateCache()
require.NotEmpty(t, cache)
storeKVPair := cache[len(cache)-1]
require.Equal(t, tc.expectedOut, storeKVPair)
}
}
func TestListenKVStoreHas(t *testing.T) {
testCases := []struct {
key []byte
expected bool
}{
{
key: kvPairs[0].Key,
expected: true,
},
}
for _, tc := range testCases {
listener := types.NewMemoryListener()
store := newListenKVStore(listener)
ok := store.Has(tc.key)
require.Equal(t, tc.expected, ok)
}
}
func TestTestListenKVStoreIterator(t *testing.T) {
listener := types.NewMemoryListener()
store := newListenKVStore(listener)
iterator := store.Iterator(nil, nil)
s, e := iterator.Domain()
require.Equal(t, []byte(nil), s)
require.Equal(t, []byte(nil), e)
testCases := []struct {
expectedKey []byte
expectedValue []byte
}{
{
expectedKey: kvPairs[0].Key,
expectedValue: kvPairs[0].Value,
},
{
expectedKey: kvPairs[1].Key,
expectedValue: kvPairs[1].Value,
},
{
expectedKey: kvPairs[2].Key,
expectedValue: kvPairs[2].Value,
},
}
for _, tc := range testCases {
ka := iterator.Key()
require.Equal(t, tc.expectedKey, ka)
va := iterator.Value()
require.Equal(t, tc.expectedValue, va)
iterator.Next()
}
require.False(t, iterator.Valid())
require.Panics(t, iterator.Next)
require.NoError(t, iterator.Close())
}
func TestTestListenKVStoreReverseIterator(t *testing.T) {
listener := types.NewMemoryListener()
store := newListenKVStore(listener)
iterator := store.ReverseIterator(nil, nil)
s, e := iterator.Domain()
require.Equal(t, []byte(nil), s)
require.Equal(t, []byte(nil), e)
testCases := []struct {
expectedKey []byte
expectedValue []byte
}{
{
expectedKey: kvPairs[2].Key,
expectedValue: kvPairs[2].Value,
},
{
expectedKey: kvPairs[1].Key,
expectedValue: kvPairs[1].Value,
},
{
expectedKey: kvPairs[0].Key,
expectedValue: kvPairs[0].Value,
},
}
for _, tc := range testCases {
ka := iterator.Key()
require.Equal(t, tc.expectedKey, ka)
va := iterator.Value()
require.Equal(t, tc.expectedValue, va)
iterator.Next()
}
require.False(t, iterator.Valid())
require.Panics(t, iterator.Next)
require.NoError(t, iterator.Close())
}
func TestListenKVStorePrefix(t *testing.T) {
store := newEmptyListenKVStore(nil)
pStore := prefix.NewStore(store, []byte("listen_prefix"))
require.IsType(t, prefix.Store{}, pStore)
}
func TestListenKVStoreGetStoreType(t *testing.T) {
memDB := dbadapter.Store{DB: dbm.NewMemDB()}
store := newEmptyListenKVStore(nil)
require.Equal(t, memDB.GetStoreType(), store.GetStoreType())
}
func TestListenKVStoreCacheWrap(t *testing.T) {
store := newEmptyListenKVStore(nil)
require.Panics(t, func() { store.CacheWrap() })
}
func TestListenKVStoreCacheWrapWithTrace(t *testing.T) {
store := newEmptyListenKVStore(nil)
require.Panics(t, func() { store.CacheWrapWithTrace(nil, nil) })
}

53
store/mem/mem_test.go Normal file
View File

@ -0,0 +1,53 @@
package mem_test
import (
"testing"
"github.com/stretchr/testify/require"
"cosmossdk.io/store/cachekv"
"cosmossdk.io/store/mem"
pruningtypes "cosmossdk.io/store/pruning/types"
"cosmossdk.io/store/types"
)
func TestStore(t *testing.T) {
db := mem.NewStore()
require.Equal(t, types.StoreTypeMemory, db.GetStoreType())
key, value := []byte("key"), []byte("value")
require.Nil(t, db.Get(key))
db.Set(key, value)
require.Equal(t, value, db.Get(key))
newValue := []byte("newValue")
db.Set(key, newValue)
require.Equal(t, newValue, db.Get(key))
db.Delete(key)
require.Nil(t, db.Get(key))
cacheWrapper := db.CacheWrap()
require.IsType(t, &cachekv.Store{}, cacheWrapper)
cacheWrappedWithTrace := db.CacheWrapWithTrace(nil, nil)
require.IsType(t, &cachekv.Store{}, cacheWrappedWithTrace)
}
func TestCommit(t *testing.T) {
db := mem.NewStore()
key, value := []byte("key"), []byte("value")
db.Set(key, value)
id := db.Commit()
require.True(t, id.IsZero())
require.True(t, db.LastCommitID().IsZero())
require.Equal(t, value, db.Get(key))
}
func TestStorePrunningOptions(t *testing.T) {
// this is a no-op
db := mem.NewStore()
require.Equal(t, pruningtypes.NewPruningOptions(pruningtypes.PruningUndefined), db.GetPruning())
}

62
store/mem/store.go Normal file
View File

@ -0,0 +1,62 @@
package mem
import (
"io"
dbm "github.com/cosmos/cosmos-db"
"cosmossdk.io/store/cachekv"
"cosmossdk.io/store/dbadapter"
pruningtypes "cosmossdk.io/store/pruning/types"
"cosmossdk.io/store/tracekv"
"cosmossdk.io/store/types"
)
var (
_ types.KVStore = (*Store)(nil)
_ types.Committer = (*Store)(nil)
)
// Store implements an in-memory only KVStore. Entries are persisted between
// commits and thus between blocks. State in Memory store is not committed as part of app state but maintained privately by each node
type Store struct {
dbadapter.Store
}
func NewStore() *Store {
return NewStoreWithDB(dbm.NewMemDB())
}
func NewStoreWithDB(db *dbm.MemDB) *Store { //nolint: interfacer // Concrete return type is fine here.
return &Store{Store: dbadapter.Store{DB: db}}
}
// GetStoreType returns the Store's type.
func (s Store) GetStoreType() types.StoreType {
return types.StoreTypeMemory
}
// CacheWrap branches the underlying store.
func (s Store) CacheWrap() types.CacheWrap {
return cachekv.NewStore(s)
}
// CacheWrapWithTrace implements KVStore.
func (s Store) CacheWrapWithTrace(w io.Writer, tc types.TraceContext) types.CacheWrap {
return cachekv.NewStore(tracekv.NewStore(s, w, tc))
}
// Commit performs a no-op as entries are persistent between commitments.
func (s *Store) Commit() (id types.CommitID) { return }
func (s *Store) SetPruning(pruning pruningtypes.PruningOptions) {}
// GetPruning is a no-op as pruning options cannot be directly set on this store.
// They must be set on the root commit multi-store.
func (s *Store) GetPruning() pruningtypes.PruningOptions {
return pruningtypes.NewPruningOptions(pruningtypes.PruningUndefined)
}
func (s Store) LastCommitID() (id types.CommitID) { return }
func (s Store) WorkingHash() (hash []byte) { return }

View File

@ -0,0 +1,56 @@
package metrics
import (
"time"
"github.com/hashicorp/go-metrics"
)
// StoreMetrics defines the set of metrics for the store package
type StoreMetrics interface {
MeasureSince(keys ...string)
}
var (
_ StoreMetrics = Metrics{}
_ StoreMetrics = NoOpMetrics{}
)
// Metrics defines the metrics wrapper for the store package
type Metrics struct {
Labels []metrics.Label
}
// NewMetrics returns a new instance of the Metrics with labels set by the node operator
func NewMetrics(labels [][]string) Metrics {
gatherer := Metrics{}
if numGlobalLables := len(labels); numGlobalLables > 0 {
parsedGlobalLabels := make([]metrics.Label, numGlobalLables)
for i, gl := range labels {
parsedGlobalLabels[i] = metrics.Label{Name: gl[0], Value: gl[1]}
}
gatherer.Labels = parsedGlobalLabels
}
return gatherer
}
// MeasureSince provides a wrapper functionality for emitting a time measure
// metric with global labels (if any).
func (m Metrics) MeasureSince(keys ...string) {
start := time.Now()
metrics.MeasureSinceWithLabels(keys, start.UTC(), m.Labels)
}
// NoOpMetrics is a no-op implementation of the StoreMetrics interface
type NoOpMetrics struct{}
// NewNoOpMetrics returns a new instance of the NoOpMetrics
func NewNoOpMetrics() NoOpMetrics {
return NoOpMetrics{}
}
// MeasureSince is a no-op implementation of the StoreMetrics interface to avoid time.Now() calls
func (m NoOpMetrics) MeasureSince(keys ...string) {}

View File

@ -0,0 +1,221 @@
// Code generated by MockGen. DO NOT EDIT.
// Source: github.com/cosmos/cosmos-db (interfaces: DB)
// Package mock is a generated GoMock package.
package mock
import (
reflect "reflect"
db "github.com/cosmos/cosmos-db"
gomock "github.com/golang/mock/gomock"
)
// MockDB is a mock of DB interface.
type MockDB struct {
ctrl *gomock.Controller
recorder *MockDBMockRecorder
}
// MockDBMockRecorder is the mock recorder for MockDB.
type MockDBMockRecorder struct {
mock *MockDB
}
// NewMockDB creates a new mock instance.
func NewMockDB(ctrl *gomock.Controller) *MockDB {
mock := &MockDB{ctrl: ctrl}
mock.recorder = &MockDBMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockDB) EXPECT() *MockDBMockRecorder {
return m.recorder
}
// Close mocks base method.
func (m *MockDB) Close() error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Close")
ret0, _ := ret[0].(error)
return ret0
}
// Close indicates an expected call of Close.
func (mr *MockDBMockRecorder) Close() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockDB)(nil).Close))
}
// Delete mocks base method.
func (m *MockDB) Delete(arg0 []byte) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Delete", arg0)
ret0, _ := ret[0].(error)
return ret0
}
// Delete indicates an expected call of Delete.
func (mr *MockDBMockRecorder) Delete(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockDB)(nil).Delete), arg0)
}
// DeleteSync mocks base method.
func (m *MockDB) DeleteSync(arg0 []byte) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "DeleteSync", arg0)
ret0, _ := ret[0].(error)
return ret0
}
// DeleteSync indicates an expected call of DeleteSync.
func (mr *MockDBMockRecorder) DeleteSync(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteSync", reflect.TypeOf((*MockDB)(nil).DeleteSync), arg0)
}
// Get mocks base method.
func (m *MockDB) Get(arg0 []byte) ([]byte, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Get", arg0)
ret0, _ := ret[0].([]byte)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Get indicates an expected call of Get.
func (mr *MockDBMockRecorder) Get(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockDB)(nil).Get), arg0)
}
// Has mocks base method.
func (m *MockDB) Has(arg0 []byte) (bool, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Has", arg0)
ret0, _ := ret[0].(bool)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Has indicates an expected call of Has.
func (mr *MockDBMockRecorder) Has(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Has", reflect.TypeOf((*MockDB)(nil).Has), arg0)
}
// Iterator mocks base method.
func (m *MockDB) Iterator(arg0, arg1 []byte) (db.Iterator, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Iterator", arg0, arg1)
ret0, _ := ret[0].(db.Iterator)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Iterator indicates an expected call of Iterator.
func (mr *MockDBMockRecorder) Iterator(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Iterator", reflect.TypeOf((*MockDB)(nil).Iterator), arg0, arg1)
}
// NewBatch mocks base method.
func (m *MockDB) NewBatch() db.Batch {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "NewBatch")
ret0, _ := ret[0].(db.Batch)
return ret0
}
// NewBatch indicates an expected call of NewBatch.
func (mr *MockDBMockRecorder) NewBatch() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewBatch", reflect.TypeOf((*MockDB)(nil).NewBatch))
}
// NewBatchWithSize mocks base method.
func (m *MockDB) NewBatchWithSize(arg0 int) db.Batch {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "NewBatchWithSize", arg0)
ret0, _ := ret[0].(db.Batch)
return ret0
}
// NewBatchWithSize indicates an expected call of NewBatchWithSize.
func (mr *MockDBMockRecorder) NewBatchWithSize(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewBatchWithSize", reflect.TypeOf((*MockDB)(nil).NewBatchWithSize), arg0)
}
// Print mocks base method.
func (m *MockDB) Print() error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Print")
ret0, _ := ret[0].(error)
return ret0
}
// Print indicates an expected call of Print.
func (mr *MockDBMockRecorder) Print() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Print", reflect.TypeOf((*MockDB)(nil).Print))
}
// ReverseIterator mocks base method.
func (m *MockDB) ReverseIterator(arg0, arg1 []byte) (db.Iterator, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ReverseIterator", arg0, arg1)
ret0, _ := ret[0].(db.Iterator)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ReverseIterator indicates an expected call of ReverseIterator.
func (mr *MockDBMockRecorder) ReverseIterator(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReverseIterator", reflect.TypeOf((*MockDB)(nil).ReverseIterator), arg0, arg1)
}
// Set mocks base method.
func (m *MockDB) Set(arg0, arg1 []byte) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Set", arg0, arg1)
ret0, _ := ret[0].(error)
return ret0
}
// Set indicates an expected call of Set.
func (mr *MockDBMockRecorder) Set(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Set", reflect.TypeOf((*MockDB)(nil).Set), arg0, arg1)
}
// SetSync mocks base method.
func (m *MockDB) SetSync(arg0, arg1 []byte) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "SetSync", arg0, arg1)
ret0, _ := ret[0].(error)
return ret0
}
// SetSync indicates an expected call of SetSync.
func (mr *MockDBMockRecorder) SetSync(arg0, arg1 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetSync", reflect.TypeOf((*MockDB)(nil).SetSync), arg0, arg1)
}
// Stats mocks base method.
func (m *MockDB) Stats() map[string]string {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Stats")
ret0, _ := ret[0].(map[string]string)
return ret0
}
// Stats indicates an expected call of Stats.
func (mr *MockDBMockRecorder) Stats() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Stats", reflect.TypeOf((*MockDB)(nil).Stats))
}

207
store/prefix/store.go Normal file
View File

@ -0,0 +1,207 @@
package prefix
import (
"bytes"
"errors"
"io"
"cosmossdk.io/store/cachekv"
"cosmossdk.io/store/tracekv"
"cosmossdk.io/store/types"
)
var _ types.KVStore = Store{}
// Store is similar with cometbft/cometbft/libs/db/prefix_db
// both gives access only to the limited subset of the store
// for convinience or safety
type Store struct {
parent types.KVStore
prefix []byte
}
func NewStore(parent types.KVStore, prefix []byte) Store {
return Store{
parent: parent,
prefix: prefix,
}
}
func cloneAppend(bz, tail []byte) (res []byte) {
res = make([]byte, len(bz)+len(tail))
copy(res, bz)
copy(res[len(bz):], tail)
return
}
func (s Store) key(key []byte) (res []byte) {
if key == nil {
panic("nil key on Store")
}
res = cloneAppend(s.prefix, key)
return
}
// Implements Store
func (s Store) GetStoreType() types.StoreType {
return s.parent.GetStoreType()
}
// Implements CacheWrap
func (s Store) CacheWrap() types.CacheWrap {
return cachekv.NewStore(s)
}
// CacheWrapWithTrace implements the KVStore interface.
func (s Store) CacheWrapWithTrace(w io.Writer, tc types.TraceContext) types.CacheWrap {
return cachekv.NewStore(tracekv.NewStore(s, w, tc))
}
// Implements KVStore
func (s Store) Get(key []byte) []byte {
res := s.parent.Get(s.key(key))
return res
}
// Implements KVStore
func (s Store) Has(key []byte) bool {
return s.parent.Has(s.key(key))
}
// Implements KVStore
func (s Store) Set(key, value []byte) {
types.AssertValidKey(key)
types.AssertValidValue(value)
s.parent.Set(s.key(key), value)
}
// Implements KVStore
func (s Store) Delete(key []byte) {
s.parent.Delete(s.key(key))
}
// Implements KVStore
// Check https://github.com/cometbft/cometbft/blob/master/libs/db/prefix_db.go#L106
func (s Store) Iterator(start, end []byte) types.Iterator {
newstart := cloneAppend(s.prefix, start)
var newend []byte
if end == nil {
newend = cpIncr(s.prefix)
} else {
newend = cloneAppend(s.prefix, end)
}
iter := s.parent.Iterator(newstart, newend)
return newPrefixIterator(s.prefix, start, end, iter)
}
// ReverseIterator implements KVStore
// Check https://github.com/cometbft/cometbft/blob/master/libs/db/prefix_db.go#L129
func (s Store) ReverseIterator(start, end []byte) types.Iterator {
newstart := cloneAppend(s.prefix, start)
var newend []byte
if end == nil {
newend = cpIncr(s.prefix)
} else {
newend = cloneAppend(s.prefix, end)
}
iter := s.parent.ReverseIterator(newstart, newend)
return newPrefixIterator(s.prefix, start, end, iter)
}
var _ types.Iterator = (*prefixIterator)(nil)
type prefixIterator struct {
prefix []byte
start []byte
end []byte
iter types.Iterator
valid bool
}
func newPrefixIterator(prefix, start, end []byte, parent types.Iterator) *prefixIterator {
return &prefixIterator{
prefix: prefix,
start: start,
end: end,
iter: parent,
valid: parent.Valid() && bytes.HasPrefix(parent.Key(), prefix),
}
}
// Implements Iterator
func (pi *prefixIterator) Domain() ([]byte, []byte) {
return pi.start, pi.end
}
// Implements Iterator
func (pi *prefixIterator) Valid() bool {
return pi.valid && pi.iter.Valid()
}
// Implements Iterator
func (pi *prefixIterator) Next() {
if !pi.valid {
panic("prefixIterator invalid, cannot call Next()")
}
if pi.iter.Next(); !pi.iter.Valid() || !bytes.HasPrefix(pi.iter.Key(), pi.prefix) {
// TODO: shouldn't pi be set to nil instead?
pi.valid = false
}
}
// Implements Iterator
func (pi *prefixIterator) Key() (key []byte) {
if !pi.valid {
panic("prefixIterator invalid, cannot call Key()")
}
key = pi.iter.Key()
key = stripPrefix(key, pi.prefix)
return
}
// Implements Iterator
func (pi *prefixIterator) Value() []byte {
if !pi.valid {
panic("prefixIterator invalid, cannot call Value()")
}
return pi.iter.Value()
}
// Implements Iterator
func (pi *prefixIterator) Close() error {
return pi.iter.Close()
}
// Error returns an error if the prefixIterator is invalid defined by the Valid
// method.
func (pi *prefixIterator) Error() error {
if !pi.Valid() {
return errors.New("invalid prefixIterator")
}
return nil
}
// copied from github.com/cometbft/cometbft/libs/db/prefix_db.go
func stripPrefix(key, prefix []byte) []byte {
if len(key) < len(prefix) || !bytes.Equal(key[:len(prefix)], prefix) {
panic("should not happen")
}
return key[len(prefix):]
}
// wrapping types.PrefixEndBytes
func cpIncr(bz []byte) []byte {
return types.PrefixEndBytes(bz)
}

451
store/prefix/store_test.go Normal file
View File

@ -0,0 +1,451 @@
package prefix
import (
"crypto/rand"
"testing"
dbm "github.com/cosmos/cosmos-db"
tiavl "github.com/cosmos/iavl"
"github.com/stretchr/testify/require"
"cosmossdk.io/log"
"cosmossdk.io/store/cachekv"
"cosmossdk.io/store/dbadapter"
"cosmossdk.io/store/gaskv"
"cosmossdk.io/store/iavl"
"cosmossdk.io/store/types"
"cosmossdk.io/store/wrapper"
)
// copied from iavl/store_test.go
var (
cacheSize = 100
)
func bz(s string) []byte { return []byte(s) }
type kvpair struct {
key []byte
value []byte
}
func genRandomKVPairs(t *testing.T) []kvpair {
t.Helper()
kvps := make([]kvpair, 20)
for i := 0; i < 20; i++ {
kvps[i].key = make([]byte, 32)
_, err := rand.Read(kvps[i].key)
require.NoError(t, err)
kvps[i].value = make([]byte, 32)
_, err = rand.Read(kvps[i].value)
require.NoError(t, err)
}
return kvps
}
func setRandomKVPairs(t *testing.T, store types.KVStore) []kvpair {
t.Helper()
kvps := genRandomKVPairs(t)
for _, kvp := range kvps {
store.Set(kvp.key, kvp.value)
}
return kvps
}
func testPrefixStore(t *testing.T, baseStore types.KVStore, prefix []byte) {
t.Helper()
prefixStore := NewStore(baseStore, prefix)
prefixPrefixStore := NewStore(prefixStore, []byte("prefix"))
require.Panics(t, func() { prefixStore.Get(nil) })
require.Panics(t, func() { prefixStore.Set(nil, []byte{}) })
kvps := setRandomKVPairs(t, prefixPrefixStore)
for i := 0; i < 20; i++ {
key := kvps[i].key
value := kvps[i].value
require.True(t, prefixPrefixStore.Has(key))
require.Equal(t, value, prefixPrefixStore.Get(key))
key = append([]byte("prefix"), key...)
require.True(t, prefixStore.Has(key))
require.Equal(t, value, prefixStore.Get(key))
key = append(prefix, key...)
require.True(t, baseStore.Has(key))
require.Equal(t, value, baseStore.Get(key))
key = kvps[i].key
prefixPrefixStore.Delete(key)
require.False(t, prefixPrefixStore.Has(key))
require.Nil(t, prefixPrefixStore.Get(key))
key = append([]byte("prefix"), key...)
require.False(t, prefixStore.Has(key))
require.Nil(t, prefixStore.Get(key))
key = append(prefix, key...)
require.False(t, baseStore.Has(key))
require.Nil(t, baseStore.Get(key))
}
}
func TestIAVLStorePrefix(t *testing.T) {
db := wrapper.NewDBWrapper(dbm.NewMemDB())
tree := tiavl.NewMutableTree(db, cacheSize, false, log.NewNopLogger())
iavlStore := iavl.UnsafeNewStore(tree)
testPrefixStore(t, iavlStore, []byte("test"))
}
func TestPrefixKVStoreNoNilSet(t *testing.T) {
meter := types.NewGasMeter(100000000)
mem := dbadapter.Store{DB: dbm.NewMemDB()}
gasStore := gaskv.NewStore(mem, meter, types.KVGasConfig())
require.Panics(t, func() { gasStore.Set([]byte("key"), nil) }, "setting a nil value should panic")
}
func TestPrefixStoreIterate(t *testing.T) {
db := dbm.NewMemDB()
baseStore := dbadapter.Store{DB: db}
prefix := []byte("test")
prefixStore := NewStore(baseStore, prefix)
setRandomKVPairs(t, prefixStore)
bIter := types.KVStorePrefixIterator(baseStore, prefix)
pIter := types.KVStorePrefixIterator(prefixStore, nil)
for bIter.Valid() && pIter.Valid() {
require.Equal(t, bIter.Key(), append(prefix, pIter.Key()...))
require.Equal(t, bIter.Value(), pIter.Value())
bIter.Next()
pIter.Next()
}
bIter.Close()
pIter.Close()
}
func incFirstByte(bz []byte) {
bz[0]++
}
func TestCloneAppend(t *testing.T) {
kvps := genRandomKVPairs(t)
for _, kvp := range kvps {
bz := cloneAppend(kvp.key, kvp.value)
require.Equal(t, bz, append(kvp.key, kvp.value...))
incFirstByte(bz)
require.NotEqual(t, bz, append(kvp.key, kvp.value...))
bz = cloneAppend(kvp.key, kvp.value)
incFirstByte(kvp.key)
require.NotEqual(t, bz, append(kvp.key, kvp.value...))
bz = cloneAppend(kvp.key, kvp.value)
incFirstByte(kvp.value)
require.NotEqual(t, bz, append(kvp.key, kvp.value...))
}
}
func TestPrefixStoreIteratorEdgeCase(t *testing.T) {
db := dbm.NewMemDB()
baseStore := dbadapter.Store{DB: db}
// overflow in cpIncr
prefix := []byte{0xAA, 0xFF, 0xFF}
prefixStore := NewStore(baseStore, prefix)
// ascending order
baseStore.Set([]byte{0xAA, 0xFF, 0xFE}, []byte{})
baseStore.Set([]byte{0xAA, 0xFF, 0xFE, 0x00}, []byte{})
baseStore.Set([]byte{0xAA, 0xFF, 0xFF}, []byte{})
baseStore.Set([]byte{0xAA, 0xFF, 0xFF, 0x00}, []byte{})
baseStore.Set([]byte{0xAB}, []byte{})
baseStore.Set([]byte{0xAB, 0x00}, []byte{})
baseStore.Set([]byte{0xAB, 0x00, 0x00}, []byte{})
iter := prefixStore.Iterator(nil, nil)
checkDomain(t, iter, nil, nil)
checkItem(t, iter, []byte{}, bz(""))
checkNext(t, iter, true)
checkItem(t, iter, []byte{0x00}, bz(""))
checkNext(t, iter, false)
checkInvalid(t, iter)
iter.Close()
}
func TestPrefixStoreReverseIteratorEdgeCase(t *testing.T) {
db := dbm.NewMemDB()
baseStore := dbadapter.Store{DB: db}
// overflow in cpIncr
prefix := []byte{0xAA, 0xFF, 0xFF}
prefixStore := NewStore(baseStore, prefix)
// descending order
baseStore.Set([]byte{0xAB, 0x00, 0x00}, []byte{})
baseStore.Set([]byte{0xAB, 0x00}, []byte{})
baseStore.Set([]byte{0xAB}, []byte{})
baseStore.Set([]byte{0xAA, 0xFF, 0xFF, 0x00}, []byte{})
baseStore.Set([]byte{0xAA, 0xFF, 0xFF}, []byte{})
baseStore.Set([]byte{0xAA, 0xFF, 0xFE, 0x00}, []byte{})
baseStore.Set([]byte{0xAA, 0xFF, 0xFE}, []byte{})
iter := prefixStore.ReverseIterator(nil, nil)
checkDomain(t, iter, nil, nil)
checkItem(t, iter, []byte{0x00}, bz(""))
checkNext(t, iter, true)
checkItem(t, iter, []byte{}, bz(""))
checkNext(t, iter, false)
checkInvalid(t, iter)
iter.Close()
db = dbm.NewMemDB()
baseStore = dbadapter.Store{DB: db}
// underflow in cpDecr
prefix = []byte{0xAA, 0x00, 0x00}
prefixStore = NewStore(baseStore, prefix)
baseStore.Set([]byte{0xAB, 0x00, 0x01, 0x00, 0x00}, []byte{})
baseStore.Set([]byte{0xAB, 0x00, 0x01, 0x00}, []byte{})
baseStore.Set([]byte{0xAB, 0x00, 0x01}, []byte{})
baseStore.Set([]byte{0xAA, 0x00, 0x00, 0x00}, []byte{})
baseStore.Set([]byte{0xAA, 0x00, 0x00}, []byte{})
baseStore.Set([]byte{0xA9, 0xFF, 0xFF, 0x00}, []byte{})
baseStore.Set([]byte{0xA9, 0xFF, 0xFF}, []byte{})
iter = prefixStore.ReverseIterator(nil, nil)
checkDomain(t, iter, nil, nil)
checkItem(t, iter, []byte{0x00}, bz(""))
checkNext(t, iter, true)
checkItem(t, iter, []byte{}, bz(""))
checkNext(t, iter, false)
checkInvalid(t, iter)
iter.Close()
}
// Tests below are ported from https://github.com/cometbft/cometbft/blob/master/libs/db/prefix_db_test.go
func mockStoreWithStuff() types.KVStore {
db := dbm.NewMemDB()
store := dbadapter.Store{DB: db}
// Under "key" prefix
store.Set(bz("key"), bz("value"))
store.Set(bz("key1"), bz("value1"))
store.Set(bz("key2"), bz("value2"))
store.Set(bz("key3"), bz("value3"))
store.Set(bz("something"), bz("else"))
store.Set(bz("k"), bz("val"))
store.Set(bz("ke"), bz("valu"))
store.Set(bz("kee"), bz("valuu"))
return store
}
func checkValue(t *testing.T, store types.KVStore, key, expected []byte) {
t.Helper()
bz := store.Get(key)
require.Equal(t, expected, bz)
}
func checkValid(t *testing.T, itr types.Iterator, expected bool) {
t.Helper()
valid := itr.Valid()
require.Equal(t, expected, valid)
}
func checkNext(t *testing.T, itr types.Iterator, expected bool) {
t.Helper()
itr.Next()
valid := itr.Valid()
require.Equal(t, expected, valid)
}
func checkDomain(t *testing.T, itr types.Iterator, start, end []byte) {
t.Helper()
ds, de := itr.Domain()
require.Equal(t, start, ds)
require.Equal(t, end, de)
}
func checkItem(t *testing.T, itr types.Iterator, key, value []byte) {
t.Helper()
require.Exactly(t, key, itr.Key())
require.Exactly(t, value, itr.Value())
}
func checkInvalid(t *testing.T, itr types.Iterator) {
t.Helper()
checkValid(t, itr, false)
checkKeyPanics(t, itr)
checkValuePanics(t, itr)
checkNextPanics(t, itr)
}
func checkKeyPanics(t *testing.T, itr types.Iterator) {
t.Helper()
require.Panics(t, func() { itr.Key() })
}
func checkValuePanics(t *testing.T, itr types.Iterator) {
t.Helper()
require.Panics(t, func() { itr.Value() })
}
func checkNextPanics(t *testing.T, itr types.Iterator) {
t.Helper()
require.Panics(t, func() { itr.Next() })
}
func TestPrefixDBSimple(t *testing.T) {
store := mockStoreWithStuff()
pstore := NewStore(store, bz("key"))
checkValue(t, pstore, bz("key"), nil)
checkValue(t, pstore, bz(""), bz("value"))
checkValue(t, pstore, bz("key1"), nil)
checkValue(t, pstore, bz("1"), bz("value1"))
checkValue(t, pstore, bz("key2"), nil)
checkValue(t, pstore, bz("2"), bz("value2"))
checkValue(t, pstore, bz("key3"), nil)
checkValue(t, pstore, bz("3"), bz("value3"))
checkValue(t, pstore, bz("something"), nil)
checkValue(t, pstore, bz("k"), nil)
checkValue(t, pstore, bz("ke"), nil)
checkValue(t, pstore, bz("kee"), nil)
}
func TestPrefixDBIterator1(t *testing.T) {
store := mockStoreWithStuff()
pstore := NewStore(store, bz("key"))
itr := pstore.Iterator(nil, nil)
checkDomain(t, itr, nil, nil)
checkItem(t, itr, bz(""), bz("value"))
checkNext(t, itr, true)
checkItem(t, itr, bz("1"), bz("value1"))
checkNext(t, itr, true)
checkItem(t, itr, bz("2"), bz("value2"))
checkNext(t, itr, true)
checkItem(t, itr, bz("3"), bz("value3"))
checkNext(t, itr, false)
checkInvalid(t, itr)
itr.Close()
}
func TestPrefixDBIterator2(t *testing.T) {
store := mockStoreWithStuff()
pstore := NewStore(store, bz("key"))
itr := pstore.Iterator(nil, bz(""))
checkDomain(t, itr, nil, bz(""))
checkInvalid(t, itr)
itr.Close()
}
func TestPrefixDBIterator3(t *testing.T) {
store := mockStoreWithStuff()
pstore := NewStore(store, bz("key"))
itr := pstore.Iterator(bz(""), nil)
checkDomain(t, itr, bz(""), nil)
checkItem(t, itr, bz(""), bz("value"))
checkNext(t, itr, true)
checkItem(t, itr, bz("1"), bz("value1"))
checkNext(t, itr, true)
checkItem(t, itr, bz("2"), bz("value2"))
checkNext(t, itr, true)
checkItem(t, itr, bz("3"), bz("value3"))
checkNext(t, itr, false)
checkInvalid(t, itr)
itr.Close()
}
func TestPrefixDBIterator4(t *testing.T) {
store := mockStoreWithStuff()
pstore := NewStore(store, bz("key"))
itr := pstore.Iterator(bz(""), bz(""))
checkDomain(t, itr, bz(""), bz(""))
checkInvalid(t, itr)
itr.Close()
}
func TestPrefixDBReverseIterator1(t *testing.T) {
store := mockStoreWithStuff()
pstore := NewStore(store, bz("key"))
itr := pstore.ReverseIterator(nil, nil)
checkDomain(t, itr, nil, nil)
checkItem(t, itr, bz("3"), bz("value3"))
checkNext(t, itr, true)
checkItem(t, itr, bz("2"), bz("value2"))
checkNext(t, itr, true)
checkItem(t, itr, bz("1"), bz("value1"))
checkNext(t, itr, true)
checkItem(t, itr, bz(""), bz("value"))
checkNext(t, itr, false)
checkInvalid(t, itr)
itr.Close()
}
func TestPrefixDBReverseIterator2(t *testing.T) {
store := mockStoreWithStuff()
pstore := NewStore(store, bz("key"))
itr := pstore.ReverseIterator(bz(""), nil)
checkDomain(t, itr, bz(""), nil)
checkItem(t, itr, bz("3"), bz("value3"))
checkNext(t, itr, true)
checkItem(t, itr, bz("2"), bz("value2"))
checkNext(t, itr, true)
checkItem(t, itr, bz("1"), bz("value1"))
checkNext(t, itr, true)
checkItem(t, itr, bz(""), bz("value"))
checkNext(t, itr, false)
checkInvalid(t, itr)
itr.Close()
}
func TestPrefixDBReverseIterator3(t *testing.T) {
store := mockStoreWithStuff()
pstore := NewStore(store, bz("key"))
itr := pstore.ReverseIterator(nil, bz(""))
checkDomain(t, itr, nil, bz(""))
checkInvalid(t, itr)
itr.Close()
}
func TestPrefixDBReverseIterator4(t *testing.T) {
store := mockStoreWithStuff()
pstore := NewStore(store, bz("key"))
itr := pstore.ReverseIterator(bz(""), bz(""))
checkInvalid(t, itr)
itr.Close()
}
func TestCacheWraps(t *testing.T) {
db := dbm.NewMemDB()
store := dbadapter.Store{DB: db}
cacheWrapper := store.CacheWrap()
require.IsType(t, &cachekv.Store{}, cacheWrapper)
cacheWrappedWithTrace := store.CacheWrapWithTrace(nil, nil)
require.IsType(t, &cachekv.Store{}, cacheWrappedWithTrace)
}

30
store/pruning/README.md Normal file
View File

@ -0,0 +1,30 @@
# Pruning
## Overview
Pruning is the mechanism for deleting old application heights from the disk. Depending on the use case,
nodes may require different pruning strategies. For example, archive nodes must keep all
the states and prune nothing. On the other hand, a regular validator node may want to only keep 100 latest heights for performance reasons.
## Strategies
The strategies are configured in `app.toml`, with the format `pruning = "<strategy>"` where the options are:
* `default`: only the last 362,880 states(approximately 3.5 weeks worth of state) are kept; pruning at 10 block intervals
* `nothing`: all historic states will be saved, nothing will be deleted (i.e. archiving node)
* `everything`: 2 latest states will be kept; pruning at 10 block intervals.
* `custom`: allow pruning options to be manually specified through 'pruning-keep-recent', and 'pruning-interval'
If no strategy is given to the BaseApp, `nothing` is selected. However, we perform validation on the CLI layer to require these to be always set in the config file.
## Custom Pruning
These are applied if and only if the pruning strategy is custom:
* `pruning-keep-recent`: N means to keep all of the last N states
* `pruning-interval`: N means to delete old states from disk every Nth block.
## Relationship to State Sync Snapshots
Snapshot settings are optional. However, if set, they have an effect on how pruning is done by
persisting the heights that are multiples of `state-sync.snapshot-interval` until after the snapshot is complete. See the "Relationship to Pruning" section in `snapshots/README.md` for more details.

View File

@ -0,0 +1,8 @@
package pruning
var (
PruneSnapshotHeightsKey = pruneSnapshotHeightsKey
Int64SliceToBytes = int64SliceToBytes
LoadPruningSnapshotHeights = loadPruningSnapshotHeights
)

191
store/pruning/manager.go Normal file
View File

@ -0,0 +1,191 @@
package pruning
import (
"encoding/binary"
"fmt"
"sort"
"sync"
dbm "github.com/cosmos/cosmos-db"
"cosmossdk.io/log"
"cosmossdk.io/store/pruning/types"
)
// Manager is an abstraction to handle the logic needed for
// determining when to prune old heights of the store
// based on the strategy described by the pruning options.
type Manager struct {
db dbm.DB
logger log.Logger
opts types.PruningOptions
snapshotInterval uint64
// Snapshots are taken in a separate goroutine from the regular execution
// and can be delivered asynchrounously via HandleSnapshotHeight.
// Therefore, we sync access to pruneSnapshotHeights with this mutex.
pruneSnapshotHeightsMx sync.RWMutex
// These are the heights that are multiples of snapshotInterval and kept for state sync snapshots.
// The heights are added to be pruned when a snapshot is complete.
pruneSnapshotHeights []int64
}
// NegativeHeightsError is returned when a negative height is provided to the manager.
type NegativeHeightsError struct {
Height int64
}
var _ error = &NegativeHeightsError{}
func (e *NegativeHeightsError) Error() string {
return fmt.Sprintf("failed to get pruned heights: %d", e.Height)
}
var pruneSnapshotHeightsKey = []byte("s/prunesnapshotheights")
// NewManager returns a new Manager with the given db and logger.
// The retuned manager uses a pruning strategy of "nothing" which
// keeps all heights. Users of the Manager may change the strategy
// by calling SetOptions.
func NewManager(db dbm.DB, logger log.Logger) *Manager {
return &Manager{
db: db,
logger: logger,
opts: types.NewPruningOptions(types.PruningNothing),
pruneSnapshotHeights: []int64{0},
}
}
// SetOptions sets the pruning strategy on the manager.
func (m *Manager) SetOptions(opts types.PruningOptions) {
m.opts = opts
}
// GetOptions fetches the pruning strategy from the manager.
func (m *Manager) GetOptions() types.PruningOptions {
return m.opts
}
// HandleSnapshotHeight persists the snapshot height to be pruned at the next appropriate
// height defined by the pruning strategy. It flushes the update to disk and panics if the flush fails.
// The input height must be greater than 0, and the pruning strategy must not be set to pruning nothing.
// If either of these conditions is not met, this function does nothing.
func (m *Manager) HandleSnapshotHeight(height int64) {
if m.opts.GetPruningStrategy() == types.PruningNothing || height <= 0 {
return
}
m.pruneSnapshotHeightsMx.Lock()
defer m.pruneSnapshotHeightsMx.Unlock()
m.logger.Debug("HandleSnapshotHeight", "height", height)
m.pruneSnapshotHeights = append(m.pruneSnapshotHeights, height)
sort.Slice(m.pruneSnapshotHeights, func(i, j int) bool { return m.pruneSnapshotHeights[i] < m.pruneSnapshotHeights[j] })
k := 1
for ; k < len(m.pruneSnapshotHeights); k++ {
if m.pruneSnapshotHeights[k] != m.pruneSnapshotHeights[k-1]+int64(m.snapshotInterval) {
break
}
}
m.pruneSnapshotHeights = m.pruneSnapshotHeights[k-1:]
// flush the updates to disk so that they are not lost if crash happens.
if err := m.db.SetSync(pruneSnapshotHeightsKey, int64SliceToBytes(m.pruneSnapshotHeights)); err != nil {
panic(err)
}
}
// SetSnapshotInterval sets the interval at which the snapshots are taken.
func (m *Manager) SetSnapshotInterval(snapshotInterval uint64) {
m.snapshotInterval = snapshotInterval
}
// GetPruningHeight returns the height which can prune upto if it is able to prune at the given height.
func (m *Manager) GetPruningHeight(height int64) int64 {
if m.opts.GetPruningStrategy() == types.PruningNothing {
return 0
}
if m.opts.Interval <= 0 {
return 0
}
if height%int64(m.opts.Interval) != 0 || height <= int64(m.opts.KeepRecent) {
return 0
}
// Consider the snapshot height
pruneHeight := height - 1 - int64(m.opts.KeepRecent) // we should keep the current height at least
m.pruneSnapshotHeightsMx.RLock()
defer m.pruneSnapshotHeightsMx.RUnlock()
// snapshotInterval is zero, indicating that all heights can be pruned
if m.snapshotInterval <= 0 {
return pruneHeight
}
if len(m.pruneSnapshotHeights) == 0 { // the length should be greater than zero
return 0
}
// the snapshot `m.pruneSnapshotHeights[0]` is already operated,
// so we can prune upto `m.pruneSnapshotHeights[0] + int64(m.snapshotInterval) - 1`
snHeight := m.pruneSnapshotHeights[0] + int64(m.snapshotInterval) - 1
if snHeight < pruneHeight {
return snHeight
}
return pruneHeight
}
// LoadSnapshotHeights loads the snapshot heights from the database as a crash recovery.
func (m *Manager) LoadSnapshotHeights(db dbm.DB) error {
if m.opts.GetPruningStrategy() == types.PruningNothing {
return nil
}
loadedPruneSnapshotHeights, err := loadPruningSnapshotHeights(db)
if err != nil {
return err
}
if len(loadedPruneSnapshotHeights) > 0 {
m.pruneSnapshotHeightsMx.Lock()
defer m.pruneSnapshotHeightsMx.Unlock()
m.pruneSnapshotHeights = loadedPruneSnapshotHeights
}
return nil
}
func loadPruningSnapshotHeights(db dbm.DB) ([]int64, error) {
bz, err := db.Get(pruneSnapshotHeightsKey)
if err != nil {
return nil, fmt.Errorf("failed to get post-snapshot pruned heights: %w", err)
}
if len(bz) == 0 {
return []int64{}, nil
}
pruneSnapshotHeights := make([]int64, len(bz)/8)
i, offset := 0, 0
for offset < len(bz) {
h := int64(binary.BigEndian.Uint64(bz[offset : offset+8]))
if h < 0 {
return nil, &NegativeHeightsError{Height: h}
}
pruneSnapshotHeights[i] = h
i++
offset += 8
}
return pruneSnapshotHeights, nil
}
func int64SliceToBytes(slice []int64) []byte {
bz := make([]byte, 0, len(slice)*8)
for _, ph := range slice {
buf := make([]byte, 8)
binary.BigEndian.PutUint64(buf, uint64(ph))
bz = append(bz, buf...)
}
return bz
}

View File

@ -0,0 +1,303 @@
package pruning_test
import (
"errors"
"fmt"
"testing"
db "github.com/cosmos/cosmos-db"
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/require"
"cosmossdk.io/log"
"cosmossdk.io/store/mock"
"cosmossdk.io/store/pruning"
"cosmossdk.io/store/pruning/types"
)
const dbErr = "db error"
func TestNewManager(t *testing.T) {
manager := pruning.NewManager(db.NewMemDB(), log.NewNopLogger())
require.NotNil(t, manager)
require.Equal(t, types.PruningNothing, manager.GetOptions().GetPruningStrategy())
}
func TestStrategies(t *testing.T) {
testcases := map[string]struct {
strategy types.PruningOptions
snapshotInterval uint64
strategyToAssert types.PruningStrategy
isValid bool
}{
"prune nothing - no snapshot": {
strategy: types.NewPruningOptions(types.PruningNothing),
strategyToAssert: types.PruningNothing,
},
"prune nothing - snapshot": {
strategy: types.NewPruningOptions(types.PruningNothing),
strategyToAssert: types.PruningNothing,
snapshotInterval: 100,
},
"prune default - no snapshot": {
strategy: types.NewPruningOptions(types.PruningDefault),
strategyToAssert: types.PruningDefault,
},
"prune default - snapshot": {
strategy: types.NewPruningOptions(types.PruningDefault),
strategyToAssert: types.PruningDefault,
snapshotInterval: 100,
},
"prune everything - no snapshot": {
strategy: types.NewPruningOptions(types.PruningEverything),
strategyToAssert: types.PruningEverything,
},
"prune everything - snapshot": {
strategy: types.NewPruningOptions(types.PruningEverything),
strategyToAssert: types.PruningEverything,
snapshotInterval: 100,
},
"custom 100-10-15": {
strategy: types.NewCustomPruningOptions(100, 15),
snapshotInterval: 10,
strategyToAssert: types.PruningCustom,
},
"custom 10-10-15": {
strategy: types.NewCustomPruningOptions(10, 15),
snapshotInterval: 10,
strategyToAssert: types.PruningCustom,
},
"custom 100-0-15": {
strategy: types.NewCustomPruningOptions(100, 15),
snapshotInterval: 0,
strategyToAssert: types.PruningCustom,
},
}
for name, tc := range testcases {
tc := tc // Local copy to avoid shadowing.
t.Run(name, func(t *testing.T) {
t.Parallel()
manager := pruning.NewManager(db.NewMemDB(), log.NewNopLogger())
require.NotNil(t, manager)
curStrategy := tc.strategy
manager.SetSnapshotInterval(tc.snapshotInterval)
pruneStrategy := curStrategy.GetPruningStrategy()
require.Equal(t, tc.strategyToAssert, pruneStrategy)
// Validate strategy parameters
switch pruneStrategy {
case types.PruningDefault:
require.Equal(t, uint64(362880), curStrategy.KeepRecent)
require.Equal(t, uint64(10), curStrategy.Interval)
case types.PruningNothing:
require.Equal(t, uint64(0), curStrategy.KeepRecent)
require.Equal(t, uint64(0), curStrategy.Interval)
case types.PruningEverything:
require.Equal(t, uint64(2), curStrategy.KeepRecent)
require.Equal(t, uint64(10), curStrategy.Interval)
default:
//
}
manager.SetOptions(curStrategy)
require.Equal(t, tc.strategy, manager.GetOptions())
curKeepRecent := curStrategy.KeepRecent
snHeight := int64(tc.snapshotInterval - 1)
for curHeight := int64(0); curHeight < 110000; curHeight++ {
if tc.snapshotInterval != 0 {
if curHeight > int64(tc.snapshotInterval) && curHeight%int64(tc.snapshotInterval) == int64(tc.snapshotInterval)-1 {
manager.HandleSnapshotHeight(curHeight - int64(tc.snapshotInterval) + 1)
snHeight = curHeight
}
}
pruningHeightActual := manager.GetPruningHeight(curHeight)
curHeightStr := fmt.Sprintf("height: %d", curHeight)
switch curStrategy.GetPruningStrategy() {
case types.PruningNothing:
require.Equal(t, int64(0), pruningHeightActual, curHeightStr)
default:
if curHeight > int64(curKeepRecent) && curHeight%int64(curStrategy.Interval) == 0 {
pruningHeightExpected := curHeight - int64(curKeepRecent) - 1
if tc.snapshotInterval > 0 && snHeight < pruningHeightExpected {
pruningHeightExpected = snHeight
}
require.Equal(t, pruningHeightExpected, pruningHeightActual, curHeightStr)
} else {
require.Equal(t, int64(0), pruningHeightActual, curHeightStr)
}
}
}
})
}
}
func TestPruningHeight_Inputs(t *testing.T) {
keepRecent := int64(types.NewPruningOptions(types.PruningEverything).KeepRecent)
interval := int64(types.NewPruningOptions(types.PruningEverything).Interval)
testcases := map[string]struct {
height int64
expectedResult int64
strategy types.PruningStrategy
}{
"currentHeight is negative - prune everything - invalid currentHeight": {
-1,
0,
types.PruningEverything,
},
"currentHeight is zero - prune everything - invalid currentHeight": {
0,
0,
types.PruningEverything,
},
"currentHeight is positive but within keep recent- prune everything - not kept": {
keepRecent,
0,
types.PruningEverything,
},
"currentHeight is positive and equal to keep recent+1 - no kept": {
keepRecent + 1,
0,
types.PruningEverything,
},
"currentHeight is positive and greater than keep recent+1 but not multiple of interval - no kept": {
keepRecent + 2,
0,
types.PruningEverything,
},
"currentHeight is positive and greater than keep recent+1 and multiple of interval - kept": {
interval,
interval - keepRecent - 1,
types.PruningEverything,
},
"pruning nothing, currentHeight is positive and greater than keep recent - not kept": {
interval,
0,
types.PruningNothing,
},
}
for name, tc := range testcases {
t.Run(name, func(t *testing.T) {
manager := pruning.NewManager(db.NewMemDB(), log.NewNopLogger())
require.NotNil(t, manager)
manager.SetOptions(types.NewPruningOptions(tc.strategy))
pruningHeightActual := manager.GetPruningHeight(tc.height)
require.Equal(t, tc.expectedResult, pruningHeightActual)
})
}
}
func TestHandleSnapshotHeight_DbErr_Panic(t *testing.T) {
ctrl := gomock.NewController(t)
// Setup
dbMock := mock.NewMockDB(ctrl)
dbMock.EXPECT().SetSync(gomock.Any(), gomock.Any()).Return(errors.New(dbErr)).Times(1)
manager := pruning.NewManager(dbMock, log.NewNopLogger())
manager.SetOptions(types.NewPruningOptions(types.PruningEverything))
require.NotNil(t, manager)
defer func() {
if r := recover(); r == nil {
t.Fail()
}
}()
manager.HandleSnapshotHeight(10)
}
func TestHandleSnapshotHeight_LoadFromDisk(t *testing.T) {
snapshotInterval := uint64(10)
// Setup
db := db.NewMemDB()
manager := pruning.NewManager(db, log.NewNopLogger())
require.NotNil(t, manager)
manager.SetOptions(types.NewPruningOptions(types.PruningEverything))
manager.SetSnapshotInterval(snapshotInterval)
expected := 0
for snapshotHeight := int64(-1); snapshotHeight < 100; snapshotHeight++ {
snapshotHeightStr := fmt.Sprintf("snaphost height: %d", snapshotHeight)
if snapshotHeight > int64(snapshotInterval) && snapshotHeight%int64(snapshotInterval) == 1 {
// Test flush
manager.HandleSnapshotHeight(snapshotHeight - 1)
expected = 1
}
loadedSnapshotHeights, err := pruning.LoadPruningSnapshotHeights(db)
require.NoError(t, err)
require.Equal(t, expected, len(loadedSnapshotHeights), snapshotHeightStr)
// Test load back
err = manager.LoadSnapshotHeights(db)
require.NoError(t, err)
loadedSnapshotHeights, err = pruning.LoadPruningSnapshotHeights(db)
require.NoError(t, err)
require.Equal(t, expected, len(loadedSnapshotHeights), snapshotHeightStr)
}
}
func TestLoadPruningSnapshotHeights(t *testing.T) {
var (
manager = pruning.NewManager(db.NewMemDB(), log.NewNopLogger())
err error
)
require.NotNil(t, manager)
// must not be PruningNothing
manager.SetOptions(types.NewPruningOptions(types.PruningDefault))
testcases := map[string]struct {
getFlushedPruningSnapshotHeights func() []int64
expectedResult error
}{
"negative snapshotPruningHeight - error": {
getFlushedPruningSnapshotHeights: func() []int64 {
return []int64{5, -2, 3}
},
expectedResult: &pruning.NegativeHeightsError{Height: -2},
},
"non-negative - success": {
getFlushedPruningSnapshotHeights: func() []int64 {
return []int64{5, 0, 3}
},
},
}
for name, tc := range testcases {
t.Run(name, func(t *testing.T) {
db := db.NewMemDB()
if tc.getFlushedPruningSnapshotHeights != nil {
err = db.Set(pruning.PruneSnapshotHeightsKey, pruning.Int64SliceToBytes(tc.getFlushedPruningSnapshotHeights()))
require.NoError(t, err)
}
err = manager.LoadSnapshotHeights(db)
require.Equal(t, tc.expectedResult, err)
})
}
}
func TestLoadSnapshotHeights_PruneNothing(t *testing.T) {
manager := pruning.NewManager(db.NewMemDB(), log.NewNopLogger())
require.NotNil(t, manager)
manager.SetOptions(types.NewPruningOptions(types.PruningNothing))
require.Nil(t, manager.LoadSnapshotHeights(db.NewMemDB()))
}

View File

@ -0,0 +1,130 @@
package types
import (
"errors"
"fmt"
)
// PruningOptions defines the pruning strategy used when determining which
// heights are removed from disk when committing state.
type PruningOptions struct {
// KeepRecent defines how many recent heights to keep on disk.
KeepRecent uint64
// Interval defines when the pruned heights are removed from disk.
Interval uint64
// Strategy defines the kind of pruning strategy. See below for more information on each.
Strategy PruningStrategy
}
type PruningStrategy int
// Pruning option string constants
const (
PruningOptionDefault = "default"
PruningOptionEverything = "everything"
PruningOptionNothing = "nothing"
PruningOptionCustom = "custom"
)
const (
// PruningDefault defines a pruning strategy where the last 362880 heights are
// kept where to-be pruned heights are pruned at every 10th height.
// The last 362880 heights are kept(approximately 3.5 weeks worth of state) assuming the typical
// block time is 6s. If these values do not match the applications' requirements, use the "custom" option.
PruningDefault PruningStrategy = iota
// PruningEverything defines a pruning strategy where all committed heights are
// deleted, storing only the current height and last 2 states. To-be pruned heights are
// pruned at every 10th height.
PruningEverything
// PruningNothing defines a pruning strategy where all heights are kept on disk.
// This is the only stretegy where KeepEvery=1 is allowed with state-sync snapshots disabled.
PruningNothing
// PruningCustom defines a pruning strategy where the user specifies the pruning.
PruningCustom
// PruningUndefined defines an undefined pruning strategy. It is to be returned by stores that do not support pruning.
PruningUndefined
)
const (
pruneEverythingKeepRecent = 2
pruneEverythingInterval = 10
)
var (
ErrPruningIntervalZero = errors.New("'pruning-interval' must not be 0. If you want to disable pruning, select pruning = \"nothing\"")
ErrPruningIntervalTooSmall = fmt.Errorf("'pruning-interval' must not be less than %d. For the most aggressive pruning, select pruning = \"everything\"", pruneEverythingInterval)
ErrPruningKeepRecentTooSmall = fmt.Errorf("'pruning-keep-recent' must not be less than %d. For the most aggressive pruning, select pruning = \"everything\"", pruneEverythingKeepRecent)
)
func NewPruningOptions(pruningStrategy PruningStrategy) PruningOptions {
switch pruningStrategy {
case PruningDefault:
return PruningOptions{
KeepRecent: 362880,
Interval: 10,
Strategy: PruningDefault,
}
case PruningEverything:
return PruningOptions{
KeepRecent: pruneEverythingKeepRecent,
Interval: pruneEverythingInterval,
Strategy: PruningEverything,
}
case PruningNothing:
return PruningOptions{
KeepRecent: 0,
Interval: 0,
Strategy: PruningNothing,
}
default:
return PruningOptions{
Strategy: PruningCustom,
}
}
}
func NewCustomPruningOptions(keepRecent, interval uint64) PruningOptions {
return PruningOptions{
KeepRecent: keepRecent,
Interval: interval,
Strategy: PruningCustom,
}
}
func (po PruningOptions) GetPruningStrategy() PruningStrategy {
return po.Strategy
}
func (po PruningOptions) Validate() error {
if po.Strategy == PruningNothing {
return nil
}
if po.Interval == 0 {
return ErrPruningIntervalZero
}
if po.Interval < pruneEverythingInterval {
return ErrPruningIntervalTooSmall
}
if po.KeepRecent < pruneEverythingKeepRecent {
return ErrPruningKeepRecentTooSmall
}
return nil
}
func NewPruningOptionsFromString(strategy string) PruningOptions {
switch strategy {
case PruningOptionEverything:
return NewPruningOptions(PruningEverything)
case PruningOptionNothing:
return NewPruningOptions(PruningNothing)
case PruningOptionDefault:
return NewPruningOptions(PruningDefault)
default:
return NewPruningOptions(PruningDefault)
}
}

View File

@ -0,0 +1,65 @@
package types
import (
"testing"
"github.com/stretchr/testify/require"
)
func TestPruningOptions_Validate(t *testing.T) {
testCases := []struct {
opts PruningOptions
expectErr error
}{
{NewPruningOptions(PruningDefault), nil},
{NewPruningOptions(PruningEverything), nil},
{NewPruningOptions(PruningNothing), nil},
{NewPruningOptions(PruningCustom), ErrPruningIntervalZero},
{NewCustomPruningOptions(2, 10), nil},
{NewCustomPruningOptions(100, 15), nil},
{NewCustomPruningOptions(1, 10), ErrPruningKeepRecentTooSmall},
{NewCustomPruningOptions(2, 9), ErrPruningIntervalTooSmall},
{NewCustomPruningOptions(2, 0), ErrPruningIntervalZero},
{NewCustomPruningOptions(2, 0), ErrPruningIntervalZero},
}
for _, tc := range testCases {
err := tc.opts.Validate()
require.Equal(t, tc.expectErr, err, "options: %v, err: %s", tc.opts, err)
}
}
func TestPruningOptions_GetStrategy(t *testing.T) {
testCases := []struct {
opts PruningOptions
expectedStrategy PruningStrategy
}{
{NewPruningOptions(PruningDefault), PruningDefault},
{NewPruningOptions(PruningEverything), PruningEverything},
{NewPruningOptions(PruningNothing), PruningNothing},
{NewPruningOptions(PruningCustom), PruningCustom},
{NewCustomPruningOptions(2, 10), PruningCustom},
}
for _, tc := range testCases {
actualStrategy := tc.opts.GetPruningStrategy()
require.Equal(t, tc.expectedStrategy, actualStrategy)
}
}
func TestNewPruningOptionsFromString(t *testing.T) {
testCases := []struct {
optString string
expect PruningOptions
}{
{PruningOptionDefault, NewPruningOptions(PruningDefault)},
{PruningOptionEverything, NewPruningOptions(PruningEverything)},
{PruningOptionNothing, NewPruningOptions(PruningNothing)},
{"invalid", NewPruningOptions(PruningDefault)},
}
for _, tc := range testCases {
actual := NewPruningOptionsFromString(tc.optString)
require.Equal(t, tc.expect, actual)
}
}

29
store/reexport.go Normal file
View File

@ -0,0 +1,29 @@
package store
import (
"cosmossdk.io/store/types"
)
// Import cosmos-sdk/types/store.go for convenience.
type (
Store = types.Store
Committer = types.Committer
CommitStore = types.CommitStore
MultiStore = types.MultiStore
CacheMultiStore = types.CacheMultiStore
CommitMultiStore = types.CommitMultiStore
KVStore = types.KVStore
Iterator = types.Iterator
CacheKVStore = types.CacheKVStore
CommitKVStore = types.CommitKVStore
CacheWrapper = types.CacheWrapper
CacheWrap = types.CacheWrap
CommitID = types.CommitID
Key = types.StoreKey
Type = types.StoreType
Queryable = types.Queryable
TraceContext = types.TraceContext
Gas = types.Gas
GasMeter = types.GasMeter
GasConfig = types.GasConfig
)

View File

@ -0,0 +1,49 @@
package rootmulti
import (
"cosmossdk.io/store/dbadapter"
pruningtypes "cosmossdk.io/store/pruning/types"
"cosmossdk.io/store/types"
)
var commithash = []byte("FAKE_HASH")
var (
_ types.KVStore = (*commitDBStoreAdapter)(nil)
_ types.Committer = (*commitDBStoreAdapter)(nil)
)
//----------------------------------------
// commitDBStoreWrapper should only be used for simulation/debugging,
// as it doesn't compute any commit hash, and it cannot load older state.
// Wrapper type for dbm.Db with implementation of KVStore
type commitDBStoreAdapter struct {
dbadapter.Store
}
func (cdsa commitDBStoreAdapter) Commit() types.CommitID {
return types.CommitID{
Version: -1,
Hash: commithash,
}
}
func (cdsa commitDBStoreAdapter) LastCommitID() types.CommitID {
return types.CommitID{
Version: -1,
Hash: commithash,
}
}
func (cdsa commitDBStoreAdapter) WorkingHash() []byte {
return commithash
}
func (cdsa commitDBStoreAdapter) SetPruning(_ pruningtypes.PruningOptions) {}
// GetPruning is a no-op as pruning options cannot be directly set on this store.
// They must be set on the root commit multi-store.
func (cdsa commitDBStoreAdapter) GetPruning() pruningtypes.PruningOptions {
return pruningtypes.NewPruningOptions(pruningtypes.PruningUndefined)
}

27
store/rootmulti/proof.go Normal file
View File

@ -0,0 +1,27 @@
package rootmulti
import (
"github.com/cometbft/cometbft/crypto/merkle"
storetypes "cosmossdk.io/store/types"
)
// RequireProof returns whether proof is required for the subpath.
func RequireProof(subpath string) bool {
// XXX: create a better convention.
// Currently, only when query subpath is "/key", will proof be included in
// response. If there are some changes about proof building in iavlstore.go,
// we must change code here to keep consistency with iavlStore#Query.
return subpath == "/key"
}
//-----------------------------------------------------------------------------
// XXX: This should be managed by the rootMultiStore which may want to register
// more proof ops?
func DefaultProofRuntime() (prt *merkle.ProofRuntime) {
prt = merkle.NewProofRuntime()
prt.RegisterOpDecoder(storetypes.ProofOpIAVLCommitment, storetypes.CommitmentOpDecoder)
prt.RegisterOpDecoder(storetypes.ProofOpSimpleMerkleCommitment, storetypes.CommitmentOpDecoder)
return
}

View File

@ -0,0 +1,152 @@
package rootmulti
import (
"testing"
dbm "github.com/cosmos/cosmos-db"
"github.com/stretchr/testify/require"
"cosmossdk.io/log"
"cosmossdk.io/store/iavl"
"cosmossdk.io/store/metrics"
"cosmossdk.io/store/types"
)
func TestVerifyIAVLStoreQueryProof(t *testing.T) {
// Create main tree for testing.
db := dbm.NewMemDB()
iStore, err := iavl.LoadStore(db, log.NewNopLogger(), types.NewKVStoreKey("test"), types.CommitID{}, iavl.DefaultIAVLCacheSize, false, metrics.NewNoOpMetrics())
store := iStore.(*iavl.Store)
require.Nil(t, err)
store.Set([]byte("MYKEY"), []byte("MYVALUE"))
cid := store.Commit()
// Get Proof
res, err := store.Query(&types.RequestQuery{
Path: "/key", // required path to get key/value+proof
Data: []byte("MYKEY"),
Prove: true,
})
require.NoError(t, err)
require.NotNil(t, res.ProofOps)
// Verify proof.
prt := DefaultProofRuntime()
err = prt.VerifyValue(res.ProofOps, cid.Hash, "/MYKEY", []byte("MYVALUE"))
require.Nil(t, err)
// Verify (bad) proof.
err = prt.VerifyValue(res.ProofOps, cid.Hash, "/MYKEY_NOT", []byte("MYVALUE"))
require.NotNil(t, err)
// Verify (bad) proof.
err = prt.VerifyValue(res.ProofOps, cid.Hash, "/MYKEY/MYKEY", []byte("MYVALUE"))
require.NotNil(t, err)
// Verify (bad) proof.
err = prt.VerifyValue(res.ProofOps, cid.Hash, "MYKEY", []byte("MYVALUE"))
require.NotNil(t, err)
// Verify (bad) proof.
err = prt.VerifyValue(res.ProofOps, cid.Hash, "/MYKEY", []byte("MYVALUE_NOT"))
require.NotNil(t, err)
// Verify (bad) proof.
err = prt.VerifyValue(res.ProofOps, cid.Hash, "/MYKEY", []byte(nil))
require.NotNil(t, err)
}
func TestVerifyMultiStoreQueryProof(t *testing.T) {
// Create main tree for testing.
db := dbm.NewMemDB()
store := NewStore(db, log.NewNopLogger(), metrics.NewNoOpMetrics())
iavlStoreKey := types.NewKVStoreKey("iavlStoreKey")
store.MountStoreWithDB(iavlStoreKey, types.StoreTypeIAVL, nil)
require.NoError(t, store.LoadVersion(0))
iavlStore := store.GetCommitStore(iavlStoreKey).(*iavl.Store)
iavlStore.Set([]byte("MYKEY"), []byte("MYVALUE"))
cid := store.Commit()
// Get Proof
res, err := store.Query(&types.RequestQuery{
Path: "/iavlStoreKey/key", // required path to get key/value+proof
Data: []byte("MYKEY"),
Prove: true,
})
require.NoError(t, err)
require.NotNil(t, res.ProofOps)
// Verify proof.
prt := DefaultProofRuntime()
err = prt.VerifyValue(res.ProofOps, cid.Hash, "/iavlStoreKey/MYKEY", []byte("MYVALUE"))
require.Nil(t, err)
// Verify proof.
err = prt.VerifyValue(res.ProofOps, cid.Hash, "/iavlStoreKey/MYKEY", []byte("MYVALUE"))
require.Nil(t, err)
// Verify (bad) proof.
err = prt.VerifyValue(res.ProofOps, cid.Hash, "/iavlStoreKey/MYKEY_NOT", []byte("MYVALUE"))
require.NotNil(t, err)
// Verify (bad) proof.
err = prt.VerifyValue(res.ProofOps, cid.Hash, "/iavlStoreKey/MYKEY/MYKEY", []byte("MYVALUE"))
require.NotNil(t, err)
// Verify (bad) proof.
err = prt.VerifyValue(res.ProofOps, cid.Hash, "iavlStoreKey/MYKEY", []byte("MYVALUE"))
require.NotNil(t, err)
// Verify (bad) proof.
err = prt.VerifyValue(res.ProofOps, cid.Hash, "/MYKEY", []byte("MYVALUE"))
require.NotNil(t, err)
// Verify (bad) proof.
err = prt.VerifyValue(res.ProofOps, cid.Hash, "/iavlStoreKey/MYKEY", []byte("MYVALUE_NOT"))
require.NotNil(t, err)
// Verify (bad) proof.
err = prt.VerifyValue(res.ProofOps, cid.Hash, "/iavlStoreKey/MYKEY", []byte(nil))
require.NotNil(t, err)
}
func TestVerifyMultiStoreQueryProofAbsence(t *testing.T) {
// Create main tree for testing.
db := dbm.NewMemDB()
store := NewStore(db, log.NewNopLogger(), metrics.NewNoOpMetrics())
iavlStoreKey := types.NewKVStoreKey("iavlStoreKey")
store.MountStoreWithDB(iavlStoreKey, types.StoreTypeIAVL, nil)
err := store.LoadVersion(0)
require.NoError(t, err)
iavlStore := store.GetCommitStore(iavlStoreKey).(*iavl.Store)
iavlStore.Set([]byte("MYKEY"), []byte("MYVALUE"))
cid := store.Commit() // Commit with empty iavl store.
// Get Proof
res, err := store.Query(&types.RequestQuery{
Path: "/iavlStoreKey/key", // required path to get key/value+proof
Data: []byte("MYABSENTKEY"),
Prove: true,
})
require.NoError(t, err)
require.NotNil(t, res.ProofOps)
// Verify proof.
prt := DefaultProofRuntime()
err = prt.VerifyAbsence(res.ProofOps, cid.Hash, "/iavlStoreKey/MYABSENTKEY")
require.Nil(t, err)
// Verify (bad) proof.
prt = DefaultProofRuntime()
err = prt.VerifyAbsence(res.ProofOps, cid.Hash, "/MYABSENTKEY")
require.NotNil(t, err)
// Verify (bad) proof.
prt = DefaultProofRuntime()
err = prt.VerifyValue(res.ProofOps, cid.Hash, "/iavlStoreKey/MYABSENTKEY", []byte(""))
require.NotNil(t, err)
}

View File

@ -0,0 +1,321 @@
package rootmulti_test
import (
"crypto/sha256"
"encoding/binary"
"encoding/hex"
"errors"
"fmt"
"io"
"math/rand"
"testing"
dbm "github.com/cosmos/cosmos-db"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"cosmossdk.io/log"
"cosmossdk.io/store/iavl"
"cosmossdk.io/store/metrics"
"cosmossdk.io/store/rootmulti"
"cosmossdk.io/store/snapshots"
snapshottypes "cosmossdk.io/store/snapshots/types"
"cosmossdk.io/store/types"
)
func newMultiStoreWithGeneratedData(db dbm.DB, stores uint8, storeKeys uint64) *rootmulti.Store {
multiStore := rootmulti.NewStore(db, log.NewNopLogger(), metrics.NewNoOpMetrics())
r := rand.New(rand.NewSource(49872768940)) // Fixed seed for deterministic tests
keys := []*types.KVStoreKey{}
for i := uint8(0); i < stores; i++ {
key := types.NewKVStoreKey(fmt.Sprintf("store%v", i))
multiStore.MountStoreWithDB(key, types.StoreTypeIAVL, nil)
keys = append(keys, key)
}
err := multiStore.LoadLatestVersion()
if err != nil {
panic(err)
}
for _, key := range keys {
store := multiStore.GetCommitKVStore(key).(*iavl.Store)
for i := uint64(0); i < storeKeys; i++ {
k := make([]byte, 8)
v := make([]byte, 1024)
binary.BigEndian.PutUint64(k, i)
_, err := r.Read(v)
if err != nil {
panic(err)
}
store.Set(k, v)
}
}
multiStore.Commit()
err = multiStore.LoadLatestVersion()
if err != nil {
panic(err)
}
return multiStore
}
func newMultiStoreWithMixedMounts(db dbm.DB) *rootmulti.Store {
store := rootmulti.NewStore(db, log.NewNopLogger(), metrics.NewNoOpMetrics())
store.MountStoreWithDB(types.NewKVStoreKey("iavl1"), types.StoreTypeIAVL, nil)
store.MountStoreWithDB(types.NewKVStoreKey("iavl2"), types.StoreTypeIAVL, nil)
store.MountStoreWithDB(types.NewKVStoreKey("iavl3"), types.StoreTypeIAVL, nil)
store.MountStoreWithDB(types.NewTransientStoreKey("trans1"), types.StoreTypeTransient, nil)
if err := store.LoadLatestVersion(); err != nil {
panic(err)
}
return store
}
func newMultiStoreWithMixedMountsAndBasicData(db dbm.DB) *rootmulti.Store {
store := newMultiStoreWithMixedMounts(db)
store1 := store.GetStoreByName("iavl1").(types.CommitKVStore)
store2 := store.GetStoreByName("iavl2").(types.CommitKVStore)
trans1 := store.GetStoreByName("trans1").(types.KVStore)
store1.Set([]byte("a"), []byte{1})
store1.Set([]byte("b"), []byte{1})
store2.Set([]byte("X"), []byte{255})
store2.Set([]byte("A"), []byte{101})
trans1.Set([]byte("x1"), []byte{91})
store.Commit()
store1.Set([]byte("b"), []byte{2})
store1.Set([]byte("c"), []byte{3})
store2.Set([]byte("B"), []byte{102})
store.Commit()
store2.Set([]byte("C"), []byte{103})
store2.Delete([]byte("X"))
trans1.Set([]byte("x2"), []byte{92})
store.Commit()
return store
}
func assertStoresEqual(t *testing.T, expect, actual types.CommitKVStore, msgAndArgs ...interface{}) {
t.Helper()
assert.Equal(t, expect.LastCommitID(), actual.LastCommitID())
expectIter := expect.Iterator(nil, nil)
expectMap := map[string][]byte{}
for ; expectIter.Valid(); expectIter.Next() {
expectMap[string(expectIter.Key())] = expectIter.Value()
}
require.NoError(t, expectIter.Error())
actualIter := expect.Iterator(nil, nil)
actualMap := map[string][]byte{}
for ; actualIter.Valid(); actualIter.Next() {
actualMap[string(actualIter.Key())] = actualIter.Value()
}
require.NoError(t, actualIter.Error())
assert.Equal(t, expectMap, actualMap, msgAndArgs...)
}
func TestMultistoreSnapshot_Checksum(t *testing.T) {
// Chunks from different nodes must fit together, so all nodes must produce identical chunks.
// This checksum test makes sure that the byte stream remains identical. If the test fails
// without having changed the data (e.g. because the Protobuf or zlib encoding changes),
// snapshottypes.CurrentFormat must be bumped.
store := newMultiStoreWithGeneratedData(dbm.NewMemDB(), 5, 10000)
version := uint64(store.LastCommitID().Version)
testcases := []struct {
format uint32
chunkHashes []string
}{
{1, []string{
"503e5b51b657055b77e88169fadae543619368744ad15f1de0736c0a20482f24",
"e1a0daaa738eeb43e778aefd2805e3dd720798288a410b06da4b8459c4d8f72e",
"aa048b4ee0f484965d7b3b06822cf0772cdcaad02f3b1b9055e69f2cb365ef3c",
"7921eaa3ed4921341e504d9308a9877986a879fe216a099c86e8db66fcba4c63",
"a4a864e6c02c9fca5837ec80dc84f650b25276ed7e4820cf7516ced9f9901b86",
"980925390cc50f14998ecb1e87de719ca9dd7e72f5fefbe445397bf670f36c31",
}},
}
for _, tc := range testcases {
tc := tc
t.Run(fmt.Sprintf("Format %v", tc.format), func(t *testing.T) {
ch := make(chan io.ReadCloser)
go func() {
streamWriter := snapshots.NewStreamWriter(ch)
defer streamWriter.Close()
require.NotNil(t, streamWriter)
err := store.Snapshot(version, streamWriter)
require.NoError(t, err)
}()
hashes := []string{}
hasher := sha256.New()
for chunk := range ch {
hasher.Reset()
_, err := io.Copy(hasher, chunk)
require.NoError(t, err)
hashes = append(hashes, hex.EncodeToString(hasher.Sum(nil)))
}
assert.Equal(t, tc.chunkHashes, hashes,
"Snapshot output for format %v has changed", tc.format)
})
}
}
func TestMultistoreSnapshot_Errors(t *testing.T) {
store := newMultiStoreWithMixedMountsAndBasicData(dbm.NewMemDB())
testcases := map[string]struct {
height uint64
expectType error
}{
"0 height": {0, nil},
"unknown height": {9, nil},
}
for name, tc := range testcases {
tc := tc
t.Run(name, func(t *testing.T) {
err := store.Snapshot(tc.height, nil)
require.Error(t, err)
if tc.expectType != nil {
assert.True(t, errors.Is(err, tc.expectType))
}
})
}
}
func TestMultistoreSnapshotRestore(t *testing.T) {
source := newMultiStoreWithMixedMountsAndBasicData(dbm.NewMemDB())
target := newMultiStoreWithMixedMounts(dbm.NewMemDB())
version := uint64(source.LastCommitID().Version)
require.EqualValues(t, 3, version)
dummyExtensionItem := snapshottypes.SnapshotItem{
Item: &snapshottypes.SnapshotItem_Extension{
Extension: &snapshottypes.SnapshotExtensionMeta{
Name: "test",
Format: 1,
},
},
}
chunks := make(chan io.ReadCloser, 100)
go func() {
streamWriter := snapshots.NewStreamWriter(chunks)
require.NotNil(t, streamWriter)
defer streamWriter.Close()
err := source.Snapshot(version, streamWriter)
require.NoError(t, err)
// write an extension metadata
err = streamWriter.WriteMsg(&dummyExtensionItem)
require.NoError(t, err)
}()
streamReader, err := snapshots.NewStreamReader(chunks)
require.NoError(t, err)
nextItem, err := target.Restore(version, snapshottypes.CurrentFormat, streamReader)
require.NoError(t, err)
require.Equal(t, *dummyExtensionItem.GetExtension(), *nextItem.GetExtension())
assert.Equal(t, source.LastCommitID(), target.LastCommitID())
for _, key := range source.StoreKeysByName() {
sourceStore := source.GetStoreByName(key.Name()).(types.CommitKVStore)
targetStore := target.GetStoreByName(key.Name()).(types.CommitKVStore)
switch sourceStore.GetStoreType() {
case types.StoreTypeTransient:
assert.False(t, targetStore.Iterator(nil, nil).Valid(),
"transient store %v not empty", key.Name())
default:
assertStoresEqual(t, sourceStore, targetStore, "store %q not equal", key.Name())
}
}
}
func benchmarkMultistoreSnapshot(b *testing.B, stores uint8, storeKeys uint64) {
b.Helper()
b.Skip("Noisy with slow setup time, please see https://github.com/cosmos/cosmos-sdk/issues/8855.")
b.ReportAllocs()
b.StopTimer()
source := newMultiStoreWithGeneratedData(dbm.NewMemDB(), stores, storeKeys)
version := source.LastCommitID().Version
require.EqualValues(b, 1, version)
b.StartTimer()
for i := 0; i < b.N; i++ {
target := rootmulti.NewStore(dbm.NewMemDB(), log.NewNopLogger(), metrics.NewNoOpMetrics())
for _, key := range source.StoreKeysByName() {
target.MountStoreWithDB(key, types.StoreTypeIAVL, nil)
}
err := target.LoadLatestVersion()
require.NoError(b, err)
require.EqualValues(b, 0, target.LastCommitID().Version)
chunks := make(chan io.ReadCloser)
go func() {
streamWriter := snapshots.NewStreamWriter(chunks)
require.NotNil(b, streamWriter)
err := source.Snapshot(uint64(version), streamWriter)
require.NoError(b, err)
}()
for reader := range chunks {
_, err := io.Copy(io.Discard, reader)
require.NoError(b, err)
err = reader.Close()
require.NoError(b, err)
}
}
}
func benchmarkMultistoreSnapshotRestore(b *testing.B, stores uint8, storeKeys uint64) {
b.Helper()
b.Skip("Noisy with slow setup time, please see https://github.com/cosmos/cosmos-sdk/issues/8855.")
b.ReportAllocs()
b.StopTimer()
source := newMultiStoreWithGeneratedData(dbm.NewMemDB(), stores, storeKeys)
version := uint64(source.LastCommitID().Version)
require.EqualValues(b, 1, version)
b.StartTimer()
for i := 0; i < b.N; i++ {
target := rootmulti.NewStore(dbm.NewMemDB(), log.NewNopLogger(), metrics.NewNoOpMetrics())
for _, key := range source.StoreKeysByName() {
target.MountStoreWithDB(key, types.StoreTypeIAVL, nil)
}
err := target.LoadLatestVersion()
require.NoError(b, err)
require.EqualValues(b, 0, target.LastCommitID().Version)
chunks := make(chan io.ReadCloser)
go func() {
writer := snapshots.NewStreamWriter(chunks)
require.NotNil(b, writer)
err := source.Snapshot(version, writer)
require.NoError(b, err)
}()
reader, err := snapshots.NewStreamReader(chunks)
require.NoError(b, err)
_, err = target.Restore(version, snapshottypes.CurrentFormat, reader)
require.NoError(b, err)
require.Equal(b, source.LastCommitID(), target.LastCommitID())
}
}
func BenchmarkMultistoreSnapshot100K(b *testing.B) {
benchmarkMultistoreSnapshot(b, 10, 10000)
}
func BenchmarkMultistoreSnapshot1M(b *testing.B) {
benchmarkMultistoreSnapshot(b, 10, 100000)
}
func BenchmarkMultistoreSnapshotRestore100K(b *testing.B) {
benchmarkMultistoreSnapshotRestore(b, 10, 10000)
}
func BenchmarkMultistoreSnapshotRestore1M(b *testing.B) {
benchmarkMultistoreSnapshotRestore(b, 10, 100000)
}

1244
store/rootmulti/store.go Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,992 @@
package rootmulti
import (
"bytes"
"crypto/sha256"
"fmt"
"testing"
"time"
dbm "github.com/cosmos/cosmos-db"
"github.com/stretchr/testify/require"
"cosmossdk.io/errors"
"cosmossdk.io/log"
"cosmossdk.io/store/cachemulti"
"cosmossdk.io/store/iavl"
sdkmaps "cosmossdk.io/store/internal/maps"
"cosmossdk.io/store/metrics"
pruningtypes "cosmossdk.io/store/pruning/types"
"cosmossdk.io/store/types"
)
func TestStoreType(t *testing.T) {
db := dbm.NewMemDB()
store := NewStore(db, log.NewNopLogger(), metrics.NewNoOpMetrics())
store.MountStoreWithDB(types.NewKVStoreKey("store1"), types.StoreTypeIAVL, db)
}
func TestGetCommitKVStore(t *testing.T) {
var db dbm.DB = dbm.NewMemDB()
ms := newMultiStoreWithMounts(db, pruningtypes.NewPruningOptions(pruningtypes.PruningDefault))
err := ms.LoadLatestVersion()
require.Nil(t, err)
key := ms.keysByName["store1"]
store1 := ms.GetCommitKVStore(key)
require.NotNil(t, store1)
require.IsType(t, &iavl.Store{}, store1)
store2 := ms.GetCommitStore(key)
require.NotNil(t, store2)
require.IsType(t, &iavl.Store{}, store2)
}
func TestStoreMount(t *testing.T) {
db := dbm.NewMemDB()
store := NewStore(db, log.NewNopLogger(), metrics.NewNoOpMetrics())
key1 := types.NewKVStoreKey("store1")
key2 := types.NewKVStoreKey("store2")
dup1 := types.NewKVStoreKey("store1")
require.NotPanics(t, func() { store.MountStoreWithDB(key1, types.StoreTypeIAVL, db) })
require.NotPanics(t, func() { store.MountStoreWithDB(key2, types.StoreTypeIAVL, db) })
require.Panics(t, func() { store.MountStoreWithDB(key1, types.StoreTypeIAVL, db) })
require.Panics(t, func() { store.MountStoreWithDB(nil, types.StoreTypeIAVL, db) })
require.Panics(t, func() { store.MountStoreWithDB(dup1, types.StoreTypeIAVL, db) })
}
func TestCacheMultiStore(t *testing.T) {
var db dbm.DB = dbm.NewMemDB()
ms := newMultiStoreWithMounts(db, pruningtypes.NewPruningOptions(pruningtypes.PruningNothing))
cacheMulti := ms.CacheMultiStore()
require.IsType(t, cachemulti.Store{}, cacheMulti)
}
func TestCacheMultiStoreWithVersion(t *testing.T) {
var db dbm.DB = dbm.NewMemDB()
ms := newMultiStoreWithMounts(db, pruningtypes.NewPruningOptions(pruningtypes.PruningNothing))
err := ms.LoadLatestVersion()
require.Nil(t, err)
emptyHash := sha256.Sum256([]byte{})
appHash := emptyHash[:]
commitID := types.CommitID{Hash: appHash}
checkStore(t, ms, commitID, commitID)
k, v := []byte("wind"), []byte("blows")
store1 := ms.GetStoreByName("store1").(types.KVStore)
store1.Set(k, v)
cID := ms.Commit()
require.Equal(t, int64(1), cID.Version)
// require no failure when given an invalid or pruned version
_, err = ms.CacheMultiStoreWithVersion(cID.Version + 1)
require.Error(t, err)
// require a valid version can be cache-loaded
cms, err := ms.CacheMultiStoreWithVersion(cID.Version)
require.NoError(t, err)
// require a valid key lookup yields the correct value
kvStore := cms.GetKVStore(ms.keysByName["store1"])
require.NotNil(t, kvStore)
require.Equal(t, kvStore.Get(k), v)
// add new module stores (store4 and store5) to multi stores and commit
ms.MountStoreWithDB(types.NewKVStoreKey("store4"), types.StoreTypeIAVL, nil)
ms.MountStoreWithDB(types.NewKVStoreKey("store5"), types.StoreTypeIAVL, nil)
err = ms.LoadLatestVersionAndUpgrade(&types.StoreUpgrades{Added: []string{"store4", "store5"}})
require.NoError(t, err)
ms.Commit()
// cache multistore of version before adding store4 should works
_, err = ms.CacheMultiStoreWithVersion(1)
require.NoError(t, err)
// require we cannot commit (write) to a cache-versioned multi-store
require.Panics(t, func() {
kvStore.Set(k, []byte("newValue"))
cms.Write()
})
}
func TestHashStableWithEmptyCommit(t *testing.T) {
var db dbm.DB = dbm.NewMemDB()
ms := newMultiStoreWithMounts(db, pruningtypes.NewPruningOptions(pruningtypes.PruningNothing))
err := ms.LoadLatestVersion()
require.Nil(t, err)
emptyHash := sha256.Sum256([]byte{})
appHash := emptyHash[:]
commitID := types.CommitID{Hash: appHash}
checkStore(t, ms, commitID, commitID)
k, v := []byte("wind"), []byte("blows")
store1 := ms.GetStoreByName("store1").(types.KVStore)
store1.Set(k, v)
workingHash := ms.WorkingHash()
cID := ms.Commit()
require.Equal(t, int64(1), cID.Version)
hash := cID.Hash
require.Equal(t, workingHash, hash)
// make an empty commit, it should update version, but not affect hash
workingHash = ms.WorkingHash()
cID = ms.Commit()
require.Equal(t, workingHash, cID.Hash)
require.Equal(t, int64(2), cID.Version)
require.Equal(t, hash, cID.Hash)
}
func TestMultistoreCommitLoad(t *testing.T) {
var db dbm.DB = dbm.NewMemDB()
store := newMultiStoreWithMounts(db, pruningtypes.NewPruningOptions(pruningtypes.PruningNothing))
err := store.LoadLatestVersion()
require.Nil(t, err)
emptyHash := sha256.Sum256([]byte{})
appHash := emptyHash[:]
// New store has empty last commit.
commitID := types.CommitID{Hash: appHash}
checkStore(t, store, commitID, commitID)
// Make sure we can get stores by name.
s1 := store.GetStoreByName("store1")
require.NotNil(t, s1)
s3 := store.GetStoreByName("store3")
require.NotNil(t, s3)
s77 := store.GetStoreByName("store77")
require.Nil(t, s77)
// Make a few commits and check them.
nCommits := int64(3)
for i := int64(0); i < nCommits; i++ {
workingHash := store.WorkingHash()
commitID = store.Commit()
require.Equal(t, workingHash, commitID.Hash)
expectedCommitID := getExpectedCommitID(store, i+1)
checkStore(t, store, expectedCommitID, commitID)
}
// Load the latest multistore again and check version.
store = newMultiStoreWithMounts(db, pruningtypes.NewPruningOptions(pruningtypes.PruningNothing))
err = store.LoadLatestVersion()
require.Nil(t, err)
commitID = getExpectedCommitID(store, nCommits)
checkStore(t, store, commitID, commitID)
// Commit and check version.
workingHash := store.WorkingHash()
commitID = store.Commit()
require.Equal(t, workingHash, commitID.Hash)
expectedCommitID := getExpectedCommitID(store, nCommits+1)
checkStore(t, store, expectedCommitID, commitID)
// Load an older multistore and check version.
ver := nCommits - 1
store = newMultiStoreWithMounts(db, pruningtypes.NewPruningOptions(pruningtypes.PruningNothing))
err = store.LoadVersion(ver)
require.Nil(t, err)
commitID = getExpectedCommitID(store, ver)
checkStore(t, store, commitID, commitID)
}
func TestMultistoreLoadWithUpgrade(t *testing.T) {
var db dbm.DB = dbm.NewMemDB()
store := newMultiStoreWithMounts(db, pruningtypes.NewPruningOptions(pruningtypes.PruningNothing))
err := store.LoadLatestVersion()
require.Nil(t, err)
// write some data in all stores
k1, v1 := []byte("first"), []byte("store")
s1, _ := store.GetStoreByName("store1").(types.KVStore)
require.NotNil(t, s1)
s1.Set(k1, v1)
k2, v2 := []byte("second"), []byte("restore")
s2, _ := store.GetStoreByName("store2").(types.KVStore)
require.NotNil(t, s2)
s2.Set(k2, v2)
k3, v3 := []byte("third"), []byte("dropped")
s3, _ := store.GetStoreByName("store3").(types.KVStore)
require.NotNil(t, s3)
s3.Set(k3, v3)
s4, _ := store.GetStoreByName("store4").(types.KVStore)
require.Nil(t, s4)
// do one commit
workingHash := store.WorkingHash()
commitID := store.Commit()
require.Equal(t, workingHash, commitID.Hash)
expectedCommitID := getExpectedCommitID(store, 1)
checkStore(t, store, expectedCommitID, commitID)
ci, err := store.GetCommitInfo(1)
require.NoError(t, err)
require.Equal(t, int64(1), ci.Version)
require.Equal(t, 3, len(ci.StoreInfos))
checkContains(t, ci.StoreInfos, []string{"store1", "store2", "store3"})
// Load without changes and make sure it is sensible
store = newMultiStoreWithMounts(db, pruningtypes.NewPruningOptions(pruningtypes.PruningNothing))
err = store.LoadLatestVersion()
require.Nil(t, err)
commitID = getExpectedCommitID(store, 1)
checkStore(t, store, commitID, commitID)
// let's query data to see it was saved properly
s2, _ = store.GetStoreByName("store2").(types.KVStore)
require.NotNil(t, s2)
require.Equal(t, v2, s2.Get(k2))
// now, let's load with upgrades...
restore, upgrades := newMultiStoreWithModifiedMounts(db, pruningtypes.NewPruningOptions(pruningtypes.PruningNothing))
err = restore.LoadLatestVersionAndUpgrade(upgrades)
require.Nil(t, err)
// s1 was not changed
s1, _ = restore.GetStoreByName("store1").(types.KVStore)
require.NotNil(t, s1)
require.Equal(t, v1, s1.Get(k1))
// store3 is mounted, but data deleted are gone
s3, _ = restore.GetStoreByName("store3").(types.KVStore)
require.NotNil(t, s3)
require.Nil(t, s3.Get(k3)) // data was deleted
// store4 is mounted, with empty data
s4, _ = restore.GetStoreByName("store4").(types.KVStore)
require.NotNil(t, s4)
iterator := s4.Iterator(nil, nil)
values := 0
for ; iterator.Valid(); iterator.Next() {
values++
}
require.Zero(t, values)
require.NoError(t, iterator.Close())
// write something inside store4
k4, v4 := []byte("fourth"), []byte("created")
s4.Set(k4, v4)
// store2 is no longer mounted
st2 := restore.GetStoreByName("store2")
require.Nil(t, st2)
// restore2 has the old data
rs2, _ := restore.GetStoreByName("restore2").(types.KVStore)
require.NotNil(t, rs2)
require.Equal(t, v2, rs2.Get(k2))
// store this migrated data, and load it again without migrations
migratedID := restore.Commit()
require.Equal(t, migratedID.Version, int64(2))
reload, _ := newMultiStoreWithModifiedMounts(db, pruningtypes.NewPruningOptions(pruningtypes.PruningNothing))
// unmount store3 since store3 was deleted
unmountStore(reload, "store3")
rs3, _ := reload.GetStoreByName("store3").(types.KVStore)
require.Nil(t, rs3)
err = reload.LoadLatestVersion()
require.Nil(t, err)
require.Equal(t, migratedID, reload.LastCommitID())
// query this new store
rl1, _ := reload.GetStoreByName("store1").(types.KVStore)
require.NotNil(t, rl1)
require.Equal(t, v1, rl1.Get(k1))
rl2, _ := reload.GetStoreByName("restore2").(types.KVStore)
require.NotNil(t, rl2)
require.Equal(t, v2, rl2.Get(k2))
rl4, _ := reload.GetStoreByName("store4").(types.KVStore)
require.NotNil(t, rl4)
require.Equal(t, v4, rl4.Get(k4))
// check commitInfo in storage
ci, err = reload.GetCommitInfo(2)
require.NoError(t, err)
require.Equal(t, int64(2), ci.Version)
require.Equal(t, 3, len(ci.StoreInfos), ci.StoreInfos)
checkContains(t, ci.StoreInfos, []string{"store1", "restore2", "store4"})
}
func TestParsePath(t *testing.T) {
_, _, err := parsePath("foo")
require.Error(t, err)
store, subpath, err := parsePath("/foo")
require.NoError(t, err)
require.Equal(t, store, "foo")
require.Equal(t, subpath, "")
store, subpath, err = parsePath("/fizz/bang/baz")
require.NoError(t, err)
require.Equal(t, store, "fizz")
require.Equal(t, subpath, "/bang/baz")
substore, subsubpath, err := parsePath(subpath)
require.NoError(t, err)
require.Equal(t, substore, "bang")
require.Equal(t, subsubpath, "/baz")
}
func TestMultiStoreRestart(t *testing.T) {
db := dbm.NewMemDB()
pruning := pruningtypes.NewCustomPruningOptions(2, 1)
multi := newMultiStoreWithMounts(db, pruning)
err := multi.LoadLatestVersion()
require.Nil(t, err)
initCid := multi.LastCommitID()
k, v := "wind", "blows"
k2, v2 := "water", "flows"
k3, v3 := "fire", "burns"
for i := 1; i < 3; i++ {
// Set and commit data in one store.
store1 := multi.GetStoreByName("store1").(types.KVStore)
store1.Set([]byte(k), []byte(fmt.Sprintf("%s:%d", v, i)))
// ... and another.
store2 := multi.GetStoreByName("store2").(types.KVStore)
store2.Set([]byte(k2), []byte(fmt.Sprintf("%s:%d", v2, i)))
// ... and another.
store3 := multi.GetStoreByName("store3").(types.KVStore)
store3.Set([]byte(k3), []byte(fmt.Sprintf("%s:%d", v3, i)))
multi.Commit()
cinfo, err := multi.GetCommitInfo(int64(i))
require.NoError(t, err)
require.Equal(t, int64(i), cinfo.Version)
}
// Set and commit data in one store.
store1 := multi.GetStoreByName("store1").(types.KVStore)
store1.Set([]byte(k), []byte(fmt.Sprintf("%s:%d", v, 3)))
// ... and another.
store2 := multi.GetStoreByName("store2").(types.KVStore)
store2.Set([]byte(k2), []byte(fmt.Sprintf("%s:%d", v2, 3)))
multi.Commit()
flushedCinfo, err := multi.GetCommitInfo(3)
require.Nil(t, err)
require.NotEqual(t, initCid, flushedCinfo, "CID is different after flush to disk")
// ... and another.
store3 := multi.GetStoreByName("store3").(types.KVStore)
store3.Set([]byte(k3), []byte(fmt.Sprintf("%s:%d", v3, 3)))
multi.Commit()
postFlushCinfo, err := multi.GetCommitInfo(4)
require.NoError(t, err)
require.Equal(t, int64(4), postFlushCinfo.Version, "Commit changed after in-memory commit")
multi = newMultiStoreWithMounts(db, pruning)
err = multi.LoadLatestVersion()
require.Nil(t, err)
reloadedCid := multi.LastCommitID()
require.Equal(t, int64(4), reloadedCid.Version, "Reloaded CID is not the same as last flushed CID")
// Check that store1 and store2 retained date from 3rd commit
store1 = multi.GetStoreByName("store1").(types.KVStore)
val := store1.Get([]byte(k))
require.Equal(t, []byte(fmt.Sprintf("%s:%d", v, 3)), val, "Reloaded value not the same as last flushed value")
store2 = multi.GetStoreByName("store2").(types.KVStore)
val2 := store2.Get([]byte(k2))
require.Equal(t, []byte(fmt.Sprintf("%s:%d", v2, 3)), val2, "Reloaded value not the same as last flushed value")
// Check that store3 still has data from last commit even though update happened on 2nd commit
store3 = multi.GetStoreByName("store3").(types.KVStore)
val3 := store3.Get([]byte(k3))
require.Equal(t, []byte(fmt.Sprintf("%s:%d", v3, 3)), val3, "Reloaded value not the same as last flushed value")
}
func TestMultiStoreQuery(t *testing.T) {
db := dbm.NewMemDB()
multi := newMultiStoreWithMounts(db, pruningtypes.NewPruningOptions(pruningtypes.PruningNothing))
err := multi.LoadLatestVersion()
require.Nil(t, err)
k, v := []byte("wind"), []byte("blows")
k2, v2 := []byte("water"), []byte("flows")
// v3 := []byte("is cold")
// Commit the multistore.
_ = multi.Commit()
// Make sure we can get by name.
garbage := multi.GetStoreByName("bad-name")
require.Nil(t, garbage)
// Set and commit data in one store.
store1 := multi.GetStoreByName("store1").(types.KVStore)
store1.Set(k, v)
// ... and another.
store2 := multi.GetStoreByName("store2").(types.KVStore)
store2.Set(k2, v2)
// Commit the multistore.
cid := multi.Commit()
ver := cid.Version
// Reload multistore from database
multi = newMultiStoreWithMounts(db, pruningtypes.NewPruningOptions(pruningtypes.PruningNothing))
err = multi.LoadLatestVersion()
require.Nil(t, err)
// Test bad path.
query := types.RequestQuery{Path: "/key", Data: k, Height: ver}
_, err = multi.Query(&query)
codespace, code, _ := errors.ABCIInfo(err, false)
require.EqualValues(t, types.ErrUnknownRequest.ABCICode(), code)
require.EqualValues(t, types.ErrUnknownRequest.Codespace(), codespace)
query.Path = "h897fy32890rf63296r92"
_, err = multi.Query(&query)
codespace, code, _ = errors.ABCIInfo(err, false)
require.EqualValues(t, types.ErrUnknownRequest.ABCICode(), code)
require.EqualValues(t, types.ErrUnknownRequest.Codespace(), codespace)
// Test invalid store name.
query.Path = "/garbage/key"
_, err = multi.Query(&query)
codespace, code, _ = errors.ABCIInfo(err, false)
require.EqualValues(t, types.ErrUnknownRequest.ABCICode(), code)
require.EqualValues(t, types.ErrUnknownRequest.Codespace(), codespace)
// Test valid query with data.
query.Path = "/store1/key"
qres, err := multi.Query(&query)
require.NoError(t, err)
require.Equal(t, v, qres.Value)
// Test valid but empty query.
query.Path = "/store2/key"
query.Prove = true
qres, err = multi.Query(&query)
require.NoError(t, err)
require.Nil(t, qres.Value)
// Test store2 data.
// Since we are using the request as a reference, the path will be modified.
query.Data = k2
query.Path = "/store2/key"
qres, err = multi.Query(&query)
require.NoError(t, err)
require.Equal(t, v2, qres.Value)
}
func TestMultiStore_Pruning(t *testing.T) {
testCases := []struct {
name string
numVersions int64
po pruningtypes.PruningOptions
deleted []int64
saved []int64
}{
{"prune nothing", 10, pruningtypes.NewPruningOptions(pruningtypes.PruningNothing), nil, []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}},
{"prune everything", 12, pruningtypes.NewPruningOptions(pruningtypes.PruningEverything), []int64{1, 2, 3, 4, 5, 6, 7}, []int64{8, 9, 10, 11, 12}},
{"prune some; no batch", 10, pruningtypes.NewCustomPruningOptions(2, 1), []int64{1, 2, 3, 4, 6, 5, 7}, []int64{8, 9, 10}},
{"prune some; small batch", 10, pruningtypes.NewCustomPruningOptions(2, 3), []int64{1, 2, 3, 4, 5, 6}, []int64{7, 8, 9, 10}},
{"prune some; large batch", 10, pruningtypes.NewCustomPruningOptions(2, 11), nil, []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}},
}
for _, tc := range testCases {
tc := tc
t.Run(tc.name, func(t *testing.T) {
db := dbm.NewMemDB()
ms := newMultiStoreWithMounts(db, tc.po)
require.NoError(t, ms.LoadLatestVersion())
for i := int64(0); i < tc.numVersions; i++ {
ms.Commit()
}
for _, v := range tc.saved {
_, err := ms.CacheMultiStoreWithVersion(v)
require.NoError(t, err, "expected no error when loading height: %d", v)
}
for _, v := range tc.deleted {
_, err := ms.CacheMultiStoreWithVersion(v)
require.Error(t, err, "expected error when loading height: %d", v)
}
})
}
}
func TestMultiStore_Pruning_SameHeightsTwice(t *testing.T) {
const (
numVersions int64 = 10
keepRecent uint64 = 2
interval uint64 = 10
)
db := dbm.NewMemDB()
ms := newMultiStoreWithMounts(db, pruningtypes.NewCustomPruningOptions(keepRecent, interval))
require.NoError(t, ms.LoadLatestVersion())
var lastCommitInfo types.CommitID
for i := int64(0); i < numVersions; i++ {
lastCommitInfo = ms.Commit()
}
require.Equal(t, numVersions, lastCommitInfo.Version)
for v := int64(1); v < numVersions-int64(keepRecent); v++ {
err := ms.LoadVersion(v)
require.Error(t, err, "expected error when loading pruned height: %d", v)
}
for v := (numVersions - int64(keepRecent)); v < numVersions; v++ {
err := ms.LoadVersion(v)
require.NoError(t, err, "expected no error when loading height: %d", v)
}
// Get latest
err := ms.LoadVersion(numVersions - 1)
require.NoError(t, err)
// Ensure already pruned snapshot heights were loaded
require.NoError(t, ms.pruningManager.LoadSnapshotHeights(db))
// Test pruning the same heights again
lastCommitInfo = ms.Commit()
require.Equal(t, numVersions, lastCommitInfo.Version)
// Ensure that can commit one more height with no panic
lastCommitInfo = ms.Commit()
require.Equal(t, numVersions+1, lastCommitInfo.Version)
}
func TestMultiStore_PruningRestart(t *testing.T) {
db := dbm.NewMemDB()
ms := newMultiStoreWithMounts(db, pruningtypes.NewCustomPruningOptions(2, 11))
require.NoError(t, ms.LoadLatestVersion())
// Commit enough to build up heights to prune, where on the next block we should
// batch delete.
for i := int64(0); i < 10; i++ {
ms.Commit()
}
actualHeightToPrune := ms.pruningManager.GetPruningHeight(ms.LatestVersion())
require.Equal(t, int64(0), actualHeightToPrune)
// "restart"
ms = newMultiStoreWithMounts(db, pruningtypes.NewCustomPruningOptions(2, 11))
err := ms.LoadLatestVersion()
require.NoError(t, err)
actualHeightToPrune = ms.pruningManager.GetPruningHeight(ms.LatestVersion())
require.Equal(t, int64(0), actualHeightToPrune)
// commit one more block and ensure the heights have been pruned
ms.Commit()
actualHeightToPrune = ms.pruningManager.GetPruningHeight(ms.LatestVersion())
require.Equal(t, int64(8), actualHeightToPrune)
for v := int64(1); v <= actualHeightToPrune; v++ {
_, err := ms.CacheMultiStoreWithVersion(v)
require.Error(t, err, "expected error when loading height: %d", v)
}
}
// TestUnevenStoresHeightCheck tests if loading root store correctly errors when
// there's any module store with the wrong height
func TestUnevenStoresHeightCheck(t *testing.T) {
var db dbm.DB = dbm.NewMemDB()
store := newMultiStoreWithMounts(db, pruningtypes.NewPruningOptions(pruningtypes.PruningNothing))
err := store.LoadLatestVersion()
require.Nil(t, err)
// commit to increment store's height
store.Commit()
// mount store4 to root store
store.MountStoreWithDB(types.NewKVStoreKey("store4"), types.StoreTypeIAVL, nil)
// load the stores without upgrades
err = store.LoadLatestVersion()
require.Error(t, err)
// now, let's load with upgrades...
upgrades := &types.StoreUpgrades{
Added: []string{"store4"},
}
err = store.LoadLatestVersionAndUpgrade(upgrades)
require.Nil(t, err)
}
func TestSetInitialVersion(t *testing.T) {
db := dbm.NewMemDB()
multi := newMultiStoreWithMounts(db, pruningtypes.NewPruningOptions(pruningtypes.PruningNothing))
require.NoError(t, multi.LoadLatestVersion())
err := multi.SetInitialVersion(5)
require.NoError(t, err)
require.Equal(t, int64(5), multi.initialVersion)
multi.Commit()
require.Equal(t, int64(5), multi.LastCommitID().Version)
ckvs := multi.GetCommitKVStore(multi.keysByName["store1"])
iavlStore, ok := ckvs.(*iavl.Store)
require.True(t, ok)
require.True(t, iavlStore.VersionExists(5))
}
func TestAddListenersAndListeningEnabled(t *testing.T) {
db := dbm.NewMemDB()
multi := newMultiStoreWithMounts(db, pruningtypes.NewPruningOptions(pruningtypes.PruningNothing))
testKey := types.NewKVStoreKey("listening_test_key")
enabled := multi.ListeningEnabled(testKey)
require.False(t, enabled)
wrongTestKey := types.NewKVStoreKey("wrong_listening_test_key")
multi.AddListeners([]types.StoreKey{testKey})
enabled = multi.ListeningEnabled(wrongTestKey)
require.False(t, enabled)
enabled = multi.ListeningEnabled(testKey)
require.True(t, enabled)
}
func TestCacheWraps(t *testing.T) {
db := dbm.NewMemDB()
multi := newMultiStoreWithMounts(db, pruningtypes.NewPruningOptions(pruningtypes.PruningNothing))
cacheWrapper := multi.CacheWrap()
require.IsType(t, cachemulti.Store{}, cacheWrapper)
cacheWrappedWithTrace := multi.CacheWrapWithTrace(nil, nil)
require.IsType(t, cachemulti.Store{}, cacheWrappedWithTrace)
}
func TestTraceConcurrency(t *testing.T) {
db := dbm.NewMemDB()
multi := newMultiStoreWithMounts(db, pruningtypes.NewPruningOptions(pruningtypes.PruningNothing))
err := multi.LoadLatestVersion()
require.NoError(t, err)
b := &bytes.Buffer{}
key := multi.keysByName["store1"]
tc := types.TraceContext(map[string]interface{}{"blockHeight": 64})
multi.SetTracer(b)
multi.SetTracingContext(tc)
cms := multi.CacheMultiStore()
store1 := cms.GetKVStore(key)
cw := store1.CacheWrapWithTrace(b, tc)
_ = cw
require.NotNil(t, store1)
stop := make(chan struct{})
stopW := make(chan struct{})
go func(stop chan struct{}) {
for {
select {
case <-stop:
return
default:
store1.Set([]byte{1}, []byte{1})
cms.Write()
}
}
}(stop)
go func(stop chan struct{}) {
for {
select {
case <-stop:
return
default:
multi.SetTracingContext(tc)
}
}
}(stopW)
time.Sleep(3 * time.Second)
stop <- struct{}{}
stopW <- struct{}{}
}
func TestCommitOrdered(t *testing.T) {
var db dbm.DB = dbm.NewMemDB()
multi := newMultiStoreWithMounts(db, pruningtypes.NewPruningOptions(pruningtypes.PruningNothing))
err := multi.LoadLatestVersion()
require.Nil(t, err)
emptyHash := sha256.Sum256([]byte{})
appHash := emptyHash[:]
commitID := types.CommitID{Hash: appHash}
checkStore(t, multi, commitID, commitID)
k, v := []byte("wind"), []byte("blows")
k2, v2 := []byte("water"), []byte("flows")
k3, v3 := []byte("fire"), []byte("burns")
store1 := multi.GetStoreByName("store1").(types.KVStore)
store1.Set(k, v)
store2 := multi.GetStoreByName("store2").(types.KVStore)
store2.Set(k2, v2)
store3 := multi.GetStoreByName("store3").(types.KVStore)
store3.Set(k3, v3)
typeID := multi.Commit()
require.Equal(t, int64(1), typeID.Version)
ci, err := multi.GetCommitInfo(1)
require.NoError(t, err)
require.Equal(t, int64(1), ci.Version)
require.Equal(t, 3, len(ci.StoreInfos))
for i, s := range ci.StoreInfos {
require.Equal(t, s.Name, fmt.Sprintf("store%d", i+1))
}
}
//-----------------------------------------------------------------------
// utils
var (
testStoreKey1 = types.NewKVStoreKey("store1")
testStoreKey2 = types.NewKVStoreKey("store2")
testStoreKey3 = types.NewKVStoreKey("store3")
)
func newMultiStoreWithMounts(db dbm.DB, pruningOpts pruningtypes.PruningOptions) *Store {
store := NewStore(db, log.NewNopLogger(), metrics.NewNoOpMetrics())
store.SetPruning(pruningOpts)
store.MountStoreWithDB(testStoreKey1, types.StoreTypeIAVL, nil)
store.MountStoreWithDB(testStoreKey2, types.StoreTypeIAVL, nil)
store.MountStoreWithDB(testStoreKey3, types.StoreTypeIAVL, nil)
return store
}
func newMultiStoreWithModifiedMounts(db dbm.DB, pruningOpts pruningtypes.PruningOptions) (*Store, *types.StoreUpgrades) {
store := NewStore(db, log.NewNopLogger(), metrics.NewNoOpMetrics())
store.SetPruning(pruningOpts)
store.MountStoreWithDB(types.NewKVStoreKey("store1"), types.StoreTypeIAVL, nil)
store.MountStoreWithDB(types.NewKVStoreKey("restore2"), types.StoreTypeIAVL, nil)
store.MountStoreWithDB(types.NewKVStoreKey("store3"), types.StoreTypeIAVL, nil)
store.MountStoreWithDB(types.NewKVStoreKey("store4"), types.StoreTypeIAVL, nil)
upgrades := &types.StoreUpgrades{
Added: []string{"store4"},
Renamed: []types.StoreRename{{
OldKey: "store2",
NewKey: "restore2",
}},
Deleted: []string{"store3"},
}
return store, upgrades
}
func unmountStore(rootStore *Store, storeKeyName string) {
sk := rootStore.keysByName[storeKeyName]
delete(rootStore.stores, sk)
delete(rootStore.storesParams, sk)
delete(rootStore.keysByName, storeKeyName)
}
func checkStore(t *testing.T, store *Store, expect, got types.CommitID) {
t.Helper()
require.Equal(t, expect, got)
require.Equal(t, expect, store.LastCommitID())
}
func checkContains(tb testing.TB, info []types.StoreInfo, wanted []string) {
tb.Helper()
for _, want := range wanted {
checkHas(tb, info, want)
}
}
func checkHas(tb testing.TB, info []types.StoreInfo, want string) {
tb.Helper()
for _, i := range info {
if i.Name == want {
return
}
}
tb.Fatalf("storeInfo doesn't contain %s", want)
}
func getExpectedCommitID(store *Store, ver int64) types.CommitID {
return types.CommitID{
Version: ver,
Hash: hashStores(store.stores),
}
}
func hashStores(stores map[types.StoreKey]types.CommitKVStore) []byte {
m := make(map[string][]byte, len(stores))
for key, store := range stores {
name := key.Name()
m[name] = types.StoreInfo{
Name: name,
CommitId: store.LastCommitID(),
}.GetHash()
}
return sdkmaps.HashFromMap(m)
}
type MockListener struct {
stateCache []types.StoreKVPair
}
func (tl *MockListener) OnWrite(storeKey types.StoreKey, key, value []byte, delete bool) error {
tl.stateCache = append(tl.stateCache, types.StoreKVPair{
StoreKey: storeKey.Name(),
Key: key,
Value: value,
Delete: delete,
})
return nil
}
func TestStateListeners(t *testing.T) {
var db dbm.DB = dbm.NewMemDB()
ms := newMultiStoreWithMounts(db, pruningtypes.NewPruningOptions(pruningtypes.PruningNothing))
require.Empty(t, ms.listeners)
ms.AddListeners([]types.StoreKey{testStoreKey1})
require.Equal(t, 1, len(ms.listeners))
require.NoError(t, ms.LoadLatestVersion())
cacheMulti := ms.CacheMultiStore()
store := cacheMulti.GetKVStore(testStoreKey1)
store.Set([]byte{1}, []byte{1})
require.Empty(t, ms.PopStateCache())
// writes are observed when cache store commit.
cacheMulti.Write()
require.Equal(t, 1, len(ms.PopStateCache()))
// test no listening on unobserved store
store = cacheMulti.GetKVStore(testStoreKey2)
store.Set([]byte{1}, []byte{1})
require.Empty(t, ms.PopStateCache())
// writes are not observed when cache store commit
cacheMulti.Write()
require.Empty(t, ms.PopStateCache())
}
type commitKVStoreStub struct {
types.CommitKVStore
Committed int
}
func (stub *commitKVStoreStub) Commit() types.CommitID {
commitID := stub.CommitKVStore.Commit()
stub.Committed++
return commitID
}
func prepareStoreMap() (map[types.StoreKey]types.CommitKVStore, error) {
var db dbm.DB = dbm.NewMemDB()
store := NewStore(db, log.NewNopLogger(), metrics.NewNoOpMetrics())
store.MountStoreWithDB(types.NewKVStoreKey("iavl1"), types.StoreTypeIAVL, nil)
store.MountStoreWithDB(types.NewKVStoreKey("iavl2"), types.StoreTypeIAVL, nil)
store.MountStoreWithDB(types.NewTransientStoreKey("trans1"), types.StoreTypeTransient, nil)
if err := store.LoadLatestVersion(); err != nil {
return nil, err
}
return map[types.StoreKey]types.CommitKVStore{
testStoreKey1: &commitKVStoreStub{
CommitKVStore: store.GetStoreByName("iavl1").(types.CommitKVStore),
},
testStoreKey2: &commitKVStoreStub{
CommitKVStore: store.GetStoreByName("iavl2").(types.CommitKVStore),
},
testStoreKey3: &commitKVStoreStub{
CommitKVStore: store.GetStoreByName("trans1").(types.CommitKVStore),
},
}, nil
}
func TestCommitStores(t *testing.T) {
testCases := []struct {
name string
committed int
exptectCommit int
}{
{
"when upgrade not get interrupted",
0,
1,
},
{
"when upgrade get interrupted once",
1,
0,
},
{
"when upgrade get interrupted twice",
2,
0,
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
storeMap, err := prepareStoreMap()
require.NoError(t, err)
store := storeMap[testStoreKey1].(*commitKVStoreStub)
for i := tc.committed; i > 0; i-- {
store.Commit()
}
store.Committed = 0
var version int64 = 1
removalMap := map[types.StoreKey]bool{}
res := commitStores(version, storeMap, removalMap)
for _, s := range res.StoreInfos {
require.Equal(t, version, s.CommitId.Version)
}
require.Equal(t, version, res.Version)
require.Equal(t, tc.exptectCommit, store.Committed)
})
}
}

View File

@ -11,8 +11,8 @@ This document describes the Cosmos SDK implementation of the ABCI state sync
interface, for more information on CometBFT state sync in general see:
* [CometBFT State Sync for Developers](https://medium.com/cometbft/cometbft-core-state-sync-for-developers-70a96ba3ee35)
* [ABCI State Sync](https://docs.cometbft.com/v1.0/explanation/core/state-sync)
* [ABCI State Sync Methods](https://docs.cometbft.com/v1.0/spec/abci/abci++_basic_concepts#state-sync-methods)
* [ABCI State Sync Spec](https://docs.cometbft.com/v0.37/spec/p2p/messages/state-sync)
* [ABCI State Sync Method/Type Reference](https://docs.cometbft.com/v0.37/spec/p2p/messages/state-sync)
## Overview

View File

@ -5,8 +5,8 @@ import (
"math"
"cosmossdk.io/errors"
storeerrors "cosmossdk.io/store/v2/errors"
snapshotstypes "cosmossdk.io/store/v2/snapshots/types"
snapshottypes "cosmossdk.io/store/snapshots/types"
storetypes "cosmossdk.io/store/types"
)
// ChunkWriter reads an input stream, splits it into fixed-size chunks, and writes them to a
@ -72,7 +72,7 @@ func (w *ChunkWriter) CloseWithError(err error) {
// Write implements io.Writer.
func (w *ChunkWriter) Write(data []byte) (int, error) {
if w.closed {
return 0, errors.Wrap(storeerrors.ErrLogic, "cannot write to closed ChunkWriter")
return 0, errors.Wrap(storetypes.ErrLogic, "cannot write to closed ChunkWriter")
}
nTotal := 0
for len(data) > 0 {
@ -169,15 +169,15 @@ func DrainChunks(chunks <-chan io.ReadCloser) {
// ValidRestoreHeight will check height is valid for snapshot restore or not
func ValidRestoreHeight(format uint32, height uint64) error {
if format != snapshotstypes.CurrentFormat {
return errors.Wrapf(snapshotstypes.ErrUnknownFormat, "format %v", format)
if format != snapshottypes.CurrentFormat {
return errors.Wrapf(snapshottypes.ErrUnknownFormat, "format %v", format)
}
if height == 0 {
return errors.Wrap(storeerrors.ErrLogic, "cannot restore snapshot at height 0")
return errors.Wrap(storetypes.ErrLogic, "cannot restore snapshot at height 0")
}
if height > uint64(math.MaxInt64) {
return errors.Wrapf(snapshotstypes.ErrInvalidMetadata,
return errors.Wrapf(snapshottypes.ErrInvalidMetadata,
"snapshot height %v cannot exceed %v", height, int64(math.MaxInt64))
}

View File

@ -9,7 +9,7 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"cosmossdk.io/store/v2/snapshots"
"cosmossdk.io/store/snapshots"
)
func TestChunkWriter(t *testing.T) {

View File

@ -7,17 +7,19 @@ import (
"crypto/sha256"
"errors"
"io"
"os"
"testing"
"time"
db "github.com/cosmos/cosmos-db"
protoio "github.com/cosmos/gogoproto/io"
"github.com/stretchr/testify/require"
corestore "cosmossdk.io/core/store"
errorsmod "cosmossdk.io/errors"
"cosmossdk.io/log"
"cosmossdk.io/store/v2/snapshots"
snapshotstypes "cosmossdk.io/store/v2/snapshots/types"
"cosmossdk.io/store/snapshots"
snapshottypes "cosmossdk.io/store/snapshots/types"
"cosmossdk.io/store/types"
)
func checksums(slice [][]byte) [][]byte {
@ -61,7 +63,7 @@ func readChunks(chunks <-chan io.ReadCloser) [][]byte {
}
// snapshotItems serialize a array of bytes as SnapshotItem_ExtensionPayload, and return the chunks.
func snapshotItems(items [][]byte, ext snapshots.ExtensionSnapshotter) [][]byte {
func snapshotItems(items [][]byte, ext snapshottypes.ExtensionSnapshotter) [][]byte {
// copy the same parameters from the code
snapshotChunkSize := uint64(10e6)
snapshotBufferSize := int(snapshotChunkSize)
@ -73,19 +75,19 @@ func snapshotItems(items [][]byte, ext snapshots.ExtensionSnapshotter) [][]byte
zWriter, _ := zlib.NewWriterLevel(bufWriter, 7)
protoWriter := protoio.NewDelimitedWriter(zWriter)
for _, item := range items {
_ = snapshotstypes.WriteExtensionPayload(protoWriter, item)
_ = snapshottypes.WriteExtensionPayload(protoWriter, item)
}
// write extension metadata
_ = protoWriter.WriteMsg(&snapshotstypes.SnapshotItem{
Item: &snapshotstypes.SnapshotItem_Extension{
Extension: &snapshotstypes.SnapshotExtensionMeta{
_ = protoWriter.WriteMsg(&snapshottypes.SnapshotItem{
Item: &snapshottypes.SnapshotItem_Extension{
Extension: &snapshottypes.SnapshotExtensionMeta{
Name: ext.SnapshotName(),
Format: ext.SnapshotFormat(),
},
},
})
_ = ext.SnapshotExtension(0, func(payload []byte) error {
return snapshotstypes.WriteExtensionPayload(protoWriter, payload)
return snapshottypes.WriteExtensionPayload(protoWriter, payload)
})
_ = protoWriter.Close()
_ = bufWriter.Flush()
@ -104,21 +106,23 @@ func snapshotItems(items [][]byte, ext snapshots.ExtensionSnapshotter) [][]byte
return chunks
}
type mockCommitSnapshotter struct {
items [][]byte
type mockSnapshotter struct {
items [][]byte
prunedHeights map[int64]struct{}
snapshotInterval uint64
}
func (m *mockCommitSnapshotter) Restore(
height uint64, format uint32, protoReader protoio.Reader, chStorage chan<- *corestore.StateChanges,
) (snapshotstypes.SnapshotItem, error) {
func (m *mockSnapshotter) Restore(
height uint64, format uint32, protoReader protoio.Reader,
) (snapshottypes.SnapshotItem, error) {
if format == 0 {
return snapshotstypes.SnapshotItem{}, snapshotstypes.ErrUnknownFormat
return snapshottypes.SnapshotItem{}, snapshottypes.ErrUnknownFormat
}
if m.items != nil {
return snapshotstypes.SnapshotItem{}, errors.New("already has contents")
return snapshottypes.SnapshotItem{}, errors.New("already has contents")
}
var item snapshotstypes.SnapshotItem
var item snapshottypes.SnapshotItem
m.items = [][]byte{}
for {
item.Reset()
@ -126,7 +130,7 @@ func (m *mockCommitSnapshotter) Restore(
if err == io.EOF {
break
} else if err != nil {
return snapshotstypes.SnapshotItem{}, errorsmod.Wrap(err, "invalid protobuf message")
return snapshottypes.SnapshotItem{}, errorsmod.Wrap(err, "invalid protobuf message")
}
payload := item.GetExtensionPayload()
if payload == nil {
@ -138,59 +142,77 @@ func (m *mockCommitSnapshotter) Restore(
return item, nil
}
func (m *mockCommitSnapshotter) Snapshot(height uint64, protoWriter protoio.Writer) error {
func (m *mockSnapshotter) Snapshot(height uint64, protoWriter protoio.Writer) error {
for _, item := range m.items {
if err := snapshotstypes.WriteExtensionPayload(protoWriter, item); err != nil {
if err := snapshottypes.WriteExtensionPayload(protoWriter, item); err != nil {
return err
}
}
return nil
}
func (m *mockCommitSnapshotter) SnapshotFormat() uint32 {
return snapshotstypes.CurrentFormat
func (m *mockSnapshotter) SnapshotFormat() uint32 {
return snapshottypes.CurrentFormat
}
func (m *mockCommitSnapshotter) SupportedFormats() []uint32 {
return []uint32{snapshotstypes.CurrentFormat}
func (m *mockSnapshotter) SupportedFormats() []uint32 {
return []uint32{snapshottypes.CurrentFormat}
}
type mockStorageSnapshotter struct{}
func (m *mockStorageSnapshotter) Restore(version uint64, chStorage <-chan *corestore.StateChanges) error {
return nil
func (m *mockSnapshotter) PruneSnapshotHeight(height int64) {
m.prunedHeights[height] = struct{}{}
}
type mockErrorCommitSnapshotter struct{}
func (m *mockSnapshotter) GetSnapshotInterval() uint64 {
return m.snapshotInterval
}
var _ snapshots.CommitSnapshotter = (*mockErrorCommitSnapshotter)(nil)
func (m *mockSnapshotter) SetSnapshotInterval(snapshotInterval uint64) {
m.snapshotInterval = snapshotInterval
}
func (m *mockErrorCommitSnapshotter) Snapshot(height uint64, protoWriter protoio.Writer) error {
type mockErrorSnapshotter struct{}
var _ snapshottypes.Snapshotter = (*mockErrorSnapshotter)(nil)
func (m *mockErrorSnapshotter) Snapshot(height uint64, protoWriter protoio.Writer) error {
return errors.New("mock snapshot error")
}
func (m *mockErrorCommitSnapshotter) Restore(
height uint64, format uint32, protoReader protoio.Reader, chStorage chan<- *corestore.StateChanges,
) (snapshotstypes.SnapshotItem, error) {
return snapshotstypes.SnapshotItem{}, errors.New("mock restore error")
func (m *mockErrorSnapshotter) Restore(
height uint64, format uint32, protoReader protoio.Reader,
) (snapshottypes.SnapshotItem, error) {
return snapshottypes.SnapshotItem{}, errors.New("mock restore error")
}
func (m *mockErrorCommitSnapshotter) SnapshotFormat() uint32 {
return snapshotstypes.CurrentFormat
func (m *mockErrorSnapshotter) SnapshotFormat() uint32 {
return snapshottypes.CurrentFormat
}
func (m *mockErrorCommitSnapshotter) SupportedFormats() []uint32 {
return []uint32{snapshotstypes.CurrentFormat}
func (m *mockErrorSnapshotter) SupportedFormats() []uint32 {
return []uint32{snapshottypes.CurrentFormat}
}
func (m *mockErrorSnapshotter) PruneSnapshotHeight(height int64) {
}
func (m *mockErrorSnapshotter) GetSnapshotInterval() uint64 {
return 0
}
func (m *mockErrorSnapshotter) SetSnapshotInterval(snapshotInterval uint64) {
}
// setupBusyManager creates a manager with an empty store that is busy creating a snapshot at height 1.
// The snapshot will complete when the returned closer is called.
func setupBusyManager(t *testing.T) *snapshots.Manager {
t.Helper()
store, err := snapshots.NewStore(t.TempDir())
store, err := snapshots.NewStore(db.NewMemDB(), t.TempDir())
require.NoError(t, err)
hung := newHungCommitSnapshotter()
mgr := snapshots.NewManager(store, opts, hung, &mockStorageSnapshotter{}, nil, log.NewNopLogger())
hung := newHungSnapshotter()
hung.SetSnapshotInterval(opts.Interval)
mgr := snapshots.NewManager(store, opts, hung, nil, log.NewNopLogger())
require.Equal(t, opts.Interval, hung.snapshotInterval)
// Channel to ensure the test doesn't finish until the goroutine is done.
// Without this, there are intermittent test failures about
@ -201,6 +223,8 @@ func setupBusyManager(t *testing.T) *snapshots.Manager {
defer close(done)
_, err := mgr.Create(1)
require.NoError(t, err)
_, didPruneHeight := hung.prunedHeights[1]
require.True(t, didPruneHeight)
}()
time.Sleep(10 * time.Millisecond)
@ -213,29 +237,40 @@ func setupBusyManager(t *testing.T) *snapshots.Manager {
return mgr
}
// hungCommitSnapshotter can be used to test operations in progress. Call close to end the snapshot.
type hungCommitSnapshotter struct {
ch chan struct{}
// hungSnapshotter can be used to test operations in progress. Call close to end the snapshot.
type hungSnapshotter struct {
ch chan struct{}
prunedHeights map[int64]struct{}
snapshotInterval uint64
}
func newHungCommitSnapshotter() *hungCommitSnapshotter {
return &hungCommitSnapshotter{
ch: make(chan struct{}),
func newHungSnapshotter() *hungSnapshotter {
return &hungSnapshotter{
ch: make(chan struct{}),
prunedHeights: make(map[int64]struct{}),
}
}
func (m *hungCommitSnapshotter) Close() {
func (m *hungSnapshotter) Close() {
close(m.ch)
}
func (m *hungCommitSnapshotter) Snapshot(height uint64, protoWriter protoio.Writer) error {
func (m *hungSnapshotter) Snapshot(height uint64, protoWriter protoio.Writer) error {
<-m.ch
return nil
}
func (m *hungCommitSnapshotter) Restore(
height uint64, format uint32, protoReader protoio.Reader, chStorage chan<- *corestore.StateChanges,
) (snapshotstypes.SnapshotItem, error) {
func (m *hungSnapshotter) PruneSnapshotHeight(height int64) {
m.prunedHeights[height] = struct{}{}
}
func (m *hungSnapshotter) SetSnapshotInterval(snapshotInterval uint64) {
m.snapshotInterval = snapshotInterval
}
func (m *hungSnapshotter) Restore(
height uint64, format uint32, protoReader protoio.Reader,
) (snapshottypes.SnapshotItem, error) {
panic("not implemented")
}
@ -265,16 +300,16 @@ func (s *extSnapshotter) SupportedFormats() []uint32 {
return []uint32{1}
}
func (s *extSnapshotter) SnapshotExtension(height uint64, payloadWriter snapshots.ExtensionPayloadWriter) error {
func (s *extSnapshotter) SnapshotExtension(height uint64, payloadWriter snapshottypes.ExtensionPayloadWriter) error {
for _, i := range s.state {
if err := payloadWriter(snapshotstypes.Uint64ToBigEndian(i)); err != nil {
if err := payloadWriter(types.Uint64ToBigEndian(i)); err != nil {
return err
}
}
return nil
}
func (s *extSnapshotter) RestoreExtension(height uint64, format uint32, payloadReader snapshots.ExtensionPayloadReader) error {
func (s *extSnapshotter) RestoreExtension(height uint64, format uint32, payloadReader snapshottypes.ExtensionPayloadReader) error {
for {
payload, err := payloadReader()
if err == io.EOF {
@ -282,8 +317,21 @@ func (s *extSnapshotter) RestoreExtension(height uint64, format uint32, payloadR
} else if err != nil {
return err
}
s.state = append(s.state, snapshotstypes.BigEndianToUint64(payload))
s.state = append(s.state, types.BigEndianToUint64(payload))
}
// finalize restoration
return nil
}
// GetTempDir returns a writable temporary director for the test to use.
func GetTempDir(tb testing.TB) string {
tb.Helper()
// os.MkDir() is used instead of testing.T.TempDir()
// see https://github.com/cosmos/cosmos-sdk/pull/8475 and
// https://github.com/cosmos/cosmos-sdk/pull/10341 for
// this change's rationale.
tempdir, err := os.MkdirTemp("", "")
require.NoError(tb, err)
tb.Cleanup(func() { _ = os.RemoveAll(tempdir) })
return tempdir
}

View File

@ -11,11 +11,10 @@ import (
"sort"
"sync"
corestore "cosmossdk.io/core/store"
errorsmod "cosmossdk.io/errors"
"cosmossdk.io/log"
storeerrors "cosmossdk.io/store/v2/errors"
"cosmossdk.io/store/v2/snapshots/types"
"cosmossdk.io/store/snapshots/types"
storetypes "cosmossdk.io/store/types"
)
// Manager manages snapshot and restore operations for an app, making sure only a single
@ -32,16 +31,13 @@ import (
// 2. io.ReadCloser streams automatically propagate IO errors, and can pass arbitrary
// errors via io.Pipe.CloseWithError().
type Manager struct {
extensions map[string]ExtensionSnapshotter
extensions map[string]types.ExtensionSnapshotter
// store is the snapshot store where all completed snapshots are persisted.
store *Store
opts SnapshotOptions
// commitSnapshotter is the snapshotter for the commitment state.
commitSnapshotter CommitSnapshotter
// storageSnapshotter is the snapshotter for the storage state.
storageSnapshotter StorageSnapshotter
logger log.Logger
opts types.SnapshotOptions
// multistore is the store from which snapshots are taken.
multistore types.Snapshotter
logger log.Logger
mtx sync.Mutex
operation operation
@ -66,34 +62,32 @@ const (
opPrune operation = "prune"
opRestore operation = "restore"
chunkBufferSize = 4
chunkIDBufferSize = 1024
defaultStorageChannelBufferSize = 1024
chunkBufferSize = 4
chunkIDBufferSize = 1024
snapshotMaxItemSize = int(64e6) // SDK has no key/value size limit, so we set an arbitrary limit
)
var ErrOptsZeroSnapshotInterval = errors.New("snapshot-interval must not be 0")
var ErrOptsZeroSnapshotInterval = errors.New("snaphot-interval must not be 0")
// NewManager creates a new manager.
func NewManager(store *Store, opts SnapshotOptions, commitSnapshotter CommitSnapshotter, storageSnapshotter StorageSnapshotter, extensions map[string]ExtensionSnapshotter, logger log.Logger) *Manager {
func NewManager(store *Store, opts types.SnapshotOptions, multistore types.Snapshotter, extensions map[string]types.ExtensionSnapshotter, logger log.Logger) *Manager {
if extensions == nil {
extensions = map[string]ExtensionSnapshotter{}
extensions = map[string]types.ExtensionSnapshotter{}
}
return &Manager{
store: store,
opts: opts,
commitSnapshotter: commitSnapshotter,
storageSnapshotter: storageSnapshotter,
extensions: extensions,
logger: logger.With("module", "snapshot_manager"),
store: store,
opts: opts,
multistore: multistore,
extensions: extensions,
logger: logger,
}
}
// RegisterExtensions register extension snapshotters to manager
func (m *Manager) RegisterExtensions(extensions ...ExtensionSnapshotter) error {
func (m *Manager) RegisterExtensions(extensions ...types.ExtensionSnapshotter) error {
if m.extensions == nil {
m.extensions = make(map[string]ExtensionSnapshotter, len(extensions))
m.extensions = make(map[string]types.ExtensionSnapshotter, len(extensions))
}
for _, extension := range extensions {
name := extension.SnapshotName()
@ -118,10 +112,10 @@ func (m *Manager) begin(op operation) error {
// beginLocked begins an operation while already holding the mutex.
func (m *Manager) beginLocked(op operation) error {
if op == opNone {
return errorsmod.Wrap(storeerrors.ErrLogic, "can't begin a none operation")
return errorsmod.Wrap(storetypes.ErrLogic, "can't begin a none operation")
}
if m.operation != opNone {
return errorsmod.Wrapf(storeerrors.ErrConflict, "a %v operation is in progress", m.operation)
return errorsmod.Wrapf(storetypes.ErrConflict, "a %v operation is in progress", m.operation)
}
m.operation = op
return nil
@ -167,9 +161,11 @@ func (m *Manager) GetSnapshotBlockRetentionHeights() int64 {
// Create creates a snapshot and returns its metadata.
func (m *Manager) Create(height uint64) (*types.Snapshot, error) {
if m == nil {
return nil, errorsmod.Wrap(storeerrors.ErrLogic, "Snapshot Manager is nil")
return nil, errorsmod.Wrap(storetypes.ErrLogic, "no snapshot store configured")
}
defer m.multistore.PruneSnapshotHeight(int64(height))
err := m.begin(opSnapshot)
if err != nil {
return nil, err
@ -181,7 +177,7 @@ func (m *Manager) Create(height uint64) (*types.Snapshot, error) {
return nil, errorsmod.Wrap(err, "failed to examine latest snapshot")
}
if latest != nil && latest.Height >= height {
return nil, errorsmod.Wrapf(storeerrors.ErrConflict,
return nil, errorsmod.Wrapf(storetypes.ErrConflict,
"a more recent snapshot already exists at height %v", latest.Height)
}
@ -205,7 +201,7 @@ func (m *Manager) createSnapshot(height uint64, ch chan<- io.ReadCloser) {
}
}()
if err := m.commitSnapshotter.Snapshot(height, streamWriter); err != nil {
if err := m.multistore.Snapshot(height, streamWriter); err != nil {
streamWriter.CloseWithError(err)
return
}
@ -234,37 +230,6 @@ func (m *Manager) createSnapshot(height uint64, ch chan<- io.ReadCloser) {
}
}
// CreateMigration creates a migration snapshot and writes it to the given writer.
// It is used to migrate the state from the original store to the store/v2.
func (m *Manager) CreateMigration(height uint64, protoWriter WriteCloser) error {
if m == nil {
return errorsmod.Wrap(storeerrors.ErrLogic, "Snapshot Manager is nil")
}
err := m.begin(opSnapshot)
if err != nil {
return err
}
// m.end() will be called by the migration manager with EndMigration().
go func() {
if err := m.commitSnapshotter.Snapshot(height, protoWriter); err != nil {
protoWriter.CloseWithError(err)
return
}
_ = protoWriter.Close() // always return nil
}()
return nil
}
// EndMigration ends the migration operation.
// It will replace the current commitSnapshotter with the new one.
func (m *Manager) EndMigration(commitSnapshotter CommitSnapshotter) {
defer m.end()
m.commitSnapshotter = commitSnapshotter
}
// List lists snapshots, mirroring ABCI ListSnapshots. It can be concurrent with other operations.
func (m *Manager) List() ([]*types.Snapshot, error) {
return m.store.List()
@ -314,7 +279,7 @@ func (m *Manager) Restore(snapshot types.Snapshot) error {
return errorsmod.Wrapf(types.ErrUnknownFormat, "snapshot format %v", snapshot.Format)
}
if snapshot.Height == 0 {
return errorsmod.Wrap(storeerrors.ErrLogic, "cannot restore snapshot at height 0")
return errorsmod.Wrap(storetypes.ErrLogic, "cannot restore snapshot at height 0")
}
if snapshot.Height > uint64(math.MaxInt64) {
return errorsmod.Wrapf(types.ErrInvalidMetadata,
@ -398,20 +363,7 @@ func (m *Manager) doRestoreSnapshot(snapshot types.Snapshot, chChunks <-chan io.
return payload.Payload, nil
}
// chStorage is the channel to pass the KV pairs to the storage snapshotter.
chStorage := make(chan *corestore.StateChanges, defaultStorageChannelBufferSize)
defer close(chStorage)
storageErrs := make(chan error, 1)
go func() {
defer close(storageErrs)
err := m.storageSnapshotter.Restore(snapshot.Height, chStorage)
if err != nil {
storageErrs <- err
}
}()
nextItem, err = m.commitSnapshotter.Restore(snapshot.Height, snapshot.Format, streamReader, chStorage)
nextItem, err = m.multistore.Restore(snapshot.Height, snapshot.Format, streamReader)
if err != nil {
return errorsmod.Wrap(err, "multistore restore")
}
@ -423,11 +375,11 @@ func (m *Manager) doRestoreSnapshot(snapshot types.Snapshot, chChunks <-chan io.
}
metadata := nextItem.GetExtension()
if metadata == nil {
return errorsmod.Wrapf(storeerrors.ErrLogic, "unknown snapshot item %T", nextItem.Item)
return errorsmod.Wrapf(storetypes.ErrLogic, "unknown snapshot item %T", nextItem.Item)
}
extension, ok := m.extensions[metadata.Name]
if !ok {
return errorsmod.Wrapf(storeerrors.ErrLogic, "unknown extension snapshotter %s", metadata.Name)
return errorsmod.Wrapf(storetypes.ErrLogic, "unknown extension snapshotter %s", metadata.Name)
}
if !IsFormatSupported(extension, metadata.Format) {
return errorsmod.Wrapf(types.ErrUnknownFormat, "format %v for extension %s", metadata.Format, metadata.Name)
@ -441,12 +393,6 @@ func (m *Manager) doRestoreSnapshot(snapshot types.Snapshot, chChunks <-chan io.
return errorsmod.Wrapf(err, "extension %s don't exhausted payload stream", metadata.Name)
}
}
// wait for storage snapshotter to complete
if err := <-storageErrs; err != nil {
return errorsmod.Wrap(err, "storage snapshotter")
}
return nil
}
@ -456,11 +402,11 @@ func (m *Manager) RestoreChunk(chunk []byte) (bool, error) {
m.mtx.Lock()
defer m.mtx.Unlock()
if m.operation != opRestore {
return false, errorsmod.Wrap(storeerrors.ErrLogic, "no restore operation in progress")
return false, errorsmod.Wrap(storetypes.ErrLogic, "no restore operation in progress")
}
if int(m.restoreChunkIndex) >= len(m.restoreSnapshot.Metadata.ChunkHashes) {
return false, errorsmod.Wrap(storeerrors.ErrLogic, "received unexpected chunk")
return false, errorsmod.Wrap(storetypes.ErrLogic, "received unexpected chunk")
}
// Check if any errors have occurred yet.
@ -470,7 +416,7 @@ func (m *Manager) RestoreChunk(chunk []byte) (bool, error) {
if done.err != nil {
return false, done.err
}
return false, errorsmod.Wrap(storeerrors.ErrLogic, "restore ended unexpectedly")
return false, errorsmod.Wrap(storetypes.ErrLogic, "restore ended unexpectedly")
default:
}
@ -506,7 +452,7 @@ func (m *Manager) RestoreChunk(chunk []byte) (bool, error) {
return false, done.err
}
if !done.complete {
return false, errorsmod.Wrap(storeerrors.ErrLogic, "restore ended prematurely")
return false, errorsmod.Wrap(storetypes.ErrLogic, "restore ended prematurely")
}
return true, nil
@ -549,7 +495,7 @@ func (m *Manager) sortedExtensionNames() []string {
}
// IsFormatSupported returns if the snapshotter supports restoration from given format.
func IsFormatSupported(snapshotter ExtensionSnapshotter, format uint32) bool {
func IsFormatSupported(snapshotter types.ExtensionSnapshotter, format uint32) bool {
for _, i := range snapshotter.SupportedFormats() {
if i == format {
return true
@ -607,4 +553,6 @@ func (m *Manager) snapshot(height int64) {
}
// Close the snapshot database.
func (m *Manager) Close() error { return nil }
func (m *Manager) Close() error {
return m.store.db.Close()
}

View File

@ -4,21 +4,23 @@ import (
"errors"
"testing"
db "github.com/cosmos/cosmos-db"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"cosmossdk.io/log"
"cosmossdk.io/store/v2/snapshots"
"cosmossdk.io/store/v2/snapshots/types"
"cosmossdk.io/store/snapshots"
"cosmossdk.io/store/snapshots/types"
)
var opts = snapshots.NewSnapshotOptions(1500, 2)
var opts = types.NewSnapshotOptions(1500, 2)
func TestManager_List(t *testing.T) {
store := setupStore(t)
commitSnapshotter := &mockCommitSnapshotter{}
storageSnapshotter := &mockStorageSnapshotter{}
manager := snapshots.NewManager(store, opts, commitSnapshotter, storageSnapshotter, nil, log.NewNopLogger())
snapshotter := &mockSnapshotter{}
snapshotter.SetSnapshotInterval(opts.Interval)
manager := snapshots.NewManager(store, opts, snapshotter, nil, log.NewNopLogger())
require.Equal(t, opts.Interval, snapshotter.GetSnapshotInterval())
mgrList, err := manager.List()
require.NoError(t, err)
@ -39,7 +41,7 @@ func TestManager_List(t *testing.T) {
func TestManager_LoadChunk(t *testing.T) {
store := setupStore(t)
manager := snapshots.NewManager(store, opts, &mockCommitSnapshotter{}, &mockStorageSnapshotter{}, nil, log.NewNopLogger())
manager := snapshots.NewManager(store, opts, &mockSnapshotter{}, nil, log.NewNopLogger())
// Existing chunk should return body
chunk, err := manager.LoadChunk(2, 1, 1)
@ -65,13 +67,14 @@ func TestManager_Take(t *testing.T) {
{4, 5, 6},
{7, 8, 9},
}
commitSnapshotter := &mockCommitSnapshotter{
items: items,
snapshotter := &mockSnapshotter{
items: items,
prunedHeights: make(map[int64]struct{}),
}
extSnapshotter := newExtSnapshotter(10)
expectChunks := snapshotItems(items, extSnapshotter)
manager := snapshots.NewManager(store, opts, commitSnapshotter, &mockStorageSnapshotter{}, nil, log.NewNopLogger())
manager := snapshots.NewManager(store, opts, snapshotter, nil, log.NewNopLogger())
err := manager.RegisterExtensions(extSnapshotter)
require.NoError(t, err)
@ -82,14 +85,18 @@ func TestManager_Take(t *testing.T) {
// creating a snapshot at a lower height than the latest should error
_, err = manager.Create(3)
require.Error(t, err)
_, didPruneHeight := snapshotter.prunedHeights[3]
require.True(t, didPruneHeight)
// creating a snapshot at a higher height should be fine, and should return it
snapshot, err := manager.Create(5)
require.NoError(t, err)
_, didPruneHeight = snapshotter.prunedHeights[5]
require.True(t, didPruneHeight)
assert.Equal(t, &types.Snapshot{
Height: 5,
Format: commitSnapshotter.SnapshotFormat(),
Format: snapshotter.SnapshotFormat(),
Chunks: 1,
Hash: []uint8{0xc5, 0xf7, 0xfe, 0xea, 0xd3, 0x4d, 0x3e, 0x87, 0xff, 0x41, 0xa2, 0x27, 0xfa, 0xcb, 0x38, 0x17, 0xa, 0x5, 0xeb, 0x27, 0x4e, 0x16, 0x5e, 0xf3, 0xb2, 0x8b, 0x47, 0xd1, 0xe6, 0x94, 0x7e, 0x8b},
Metadata: types.Metadata{
@ -110,7 +117,9 @@ func TestManager_Take(t *testing.T) {
func TestManager_Prune(t *testing.T) {
store := setupStore(t)
manager := snapshots.NewManager(store, opts, &mockCommitSnapshotter{}, &mockStorageSnapshotter{}, nil, log.NewNopLogger())
snapshotter := &mockSnapshotter{}
snapshotter.SetSnapshotInterval(opts.Interval)
manager := snapshots.NewManager(store, opts, snapshotter, nil, log.NewNopLogger())
pruned, err := manager.Prune(2)
require.NoError(t, err)
@ -128,9 +137,11 @@ func TestManager_Prune(t *testing.T) {
func TestManager_Restore(t *testing.T) {
store := setupStore(t)
target := &mockCommitSnapshotter{}
target := &mockSnapshotter{
prunedHeights: make(map[int64]struct{}),
}
extSnapshotter := newExtSnapshotter(0)
manager := snapshots.NewManager(store, opts, target, &mockStorageSnapshotter{}, nil, log.NewNopLogger())
manager := snapshots.NewManager(store, opts, target, nil, log.NewNopLogger())
err := manager.RegisterExtensions(extSnapshotter)
require.NoError(t, err)
@ -180,6 +191,8 @@ func TestManager_Restore(t *testing.T) {
// While the restore is in progress, any other operations fail
_, err = manager.Create(4)
require.Error(t, err)
_, didPruneHeight := target.prunedHeights[4]
require.True(t, didPruneHeight)
_, err = manager.Prune(1)
require.Error(t, err)
@ -232,24 +245,13 @@ func TestManager_Restore(t *testing.T) {
Metadata: types.Metadata{ChunkHashes: checksums(chunks)},
})
require.NoError(t, err)
// Feeding the chunks should work
for i, chunk := range chunks {
done, err := manager.RestoreChunk(chunk)
require.NoError(t, err)
if i == len(chunks)-1 {
assert.True(t, done)
} else {
assert.False(t, done)
}
}
}
func TestManager_TakeError(t *testing.T) {
snapshotter := &mockErrorCommitSnapshotter{}
store, err := snapshots.NewStore(t.TempDir())
snapshotter := &mockErrorSnapshotter{}
store, err := snapshots.NewStore(db.NewMemDB(), GetTempDir(t))
require.NoError(t, err)
manager := snapshots.NewManager(store, opts, snapshotter, &mockStorageSnapshotter{}, nil, log.NewNopLogger())
manager := snapshots.NewManager(store, opts, snapshotter, nil, log.NewNopLogger())
_, err = manager.Create(1)
require.Error(t, err)

View File

@ -3,23 +3,20 @@ package snapshots
import (
"crypto/sha256"
"encoding/binary"
"fmt"
"hash"
"io"
"math"
"os"
"path/filepath"
"sort"
"strconv"
"strings"
"sync"
db "github.com/cosmos/cosmos-db"
"github.com/cosmos/gogoproto/proto"
"cosmossdk.io/errors"
"cosmossdk.io/store/v2"
storeerrors "cosmossdk.io/store/v2/errors"
"cosmossdk.io/store/v2/snapshots/types"
"cosmossdk.io/store/snapshots/types"
storetypes "cosmossdk.io/store/types"
)
const (
@ -29,6 +26,7 @@ const (
// Store is a snapshot store, containing snapshot metadata and binary chunks.
type Store struct {
db db.DB
dir string
mtx sync.Mutex
@ -36,20 +34,17 @@ type Store struct {
}
// NewStore creates a new snapshot store.
func NewStore(dir string) (*Store, error) {
func NewStore(db db.DB, dir string) (*Store, error) {
if dir == "" {
return nil, errors.Wrap(storeerrors.ErrLogic, "snapshot directory not given")
return nil, errors.Wrap(storetypes.ErrLogic, "snapshot directory not given")
}
err := os.MkdirAll(dir, 0o755)
if err != nil {
return nil, errors.Wrapf(err, "failed to create snapshot directory %q", dir)
}
err = os.MkdirAll(filepath.Join(dir, "metadata"), 0o750)
if err != nil {
return nil, errors.Wrapf(err, "failed to create snapshot metadata directory %q", dir)
}
return &Store{
db: db,
dir: dir,
saving: make(map[uint64]bool),
}, nil
@ -61,28 +56,29 @@ func (s *Store) Delete(height uint64, format uint32) error {
saving := s.saving[height]
s.mtx.Unlock()
if saving {
return errors.Wrapf(storeerrors.ErrConflict,
return errors.Wrapf(storetypes.ErrConflict,
"snapshot for height %v format %v is currently being saved", height, format)
}
if err := os.RemoveAll(s.pathSnapshot(height, format)); err != nil {
return errors.Wrapf(err, "failed to delete snapshot chunks for height %v format %v", height, format)
err := s.db.DeleteSync(encodeKey(height, format))
if err != nil {
return errors.Wrapf(err, "failed to delete snapshot for height %v format %v",
height, format)
}
if err := os.RemoveAll(s.pathMetadata(height, format)); err != nil {
return errors.Wrapf(err, "failed to delete snapshot metadata for height %v format %v", height, format)
}
return nil
err = os.RemoveAll(s.pathSnapshot(height, format))
return errors.Wrapf(err, "failed to delete snapshot chunks for height %v format %v",
height, format)
}
// Get fetches snapshot info from the database.
func (s *Store) Get(height uint64, format uint32) (*types.Snapshot, error) {
if _, err := os.Stat(s.pathMetadata(height, format)); os.IsNotExist(err) {
return nil, nil
}
bytes, err := os.ReadFile(s.pathMetadata(height, format))
bytes, err := s.db.Get(encodeKey(height, format))
if err != nil {
return nil, errors.Wrapf(err, "failed to fetch snapshot metadata for height %v format %v",
height, format)
}
if bytes == nil {
return nil, nil
}
snapshot := &types.Snapshot{}
err = proto.Unmarshal(bytes, snapshot)
if err != nil {
@ -95,62 +91,44 @@ func (s *Store) Get(height uint64, format uint32) (*types.Snapshot, error) {
return snapshot, nil
}
// GetLatest fetches the latest snapshot from the database, if any.
// Get fetches the latest snapshot from the database, if any.
func (s *Store) GetLatest() (*types.Snapshot, error) {
metadata, err := os.ReadDir(s.pathMetadataDir())
iter, err := s.db.ReverseIterator(encodeKey(0, 0), encodeKey(uint64(math.MaxUint64), math.MaxUint32))
if err != nil {
return nil, errors.Wrap(err, "failed to list snapshot metadata")
return nil, errors.Wrap(err, "failed to find latest snapshot")
}
if len(metadata) == 0 {
return nil, nil
}
// file system may not guarantee the order of the files, so we sort them lexically
sort.Slice(metadata, func(i, j int) bool { return metadata[i].Name() < metadata[j].Name() })
defer iter.Close()
path := filepath.Join(s.pathMetadataDir(), metadata[len(metadata)-1].Name())
if err := s.validateMetadataPath(path); err != nil {
return nil, err
var snapshot *types.Snapshot
if iter.Valid() {
snapshot = &types.Snapshot{}
err := proto.Unmarshal(iter.Value(), snapshot)
if err != nil {
return nil, errors.Wrap(err, "failed to decode latest snapshot")
}
}
bz, err := os.ReadFile(path)
if err != nil {
return nil, errors.Wrapf(err, "failed to read latest snapshot metadata %s", path)
}
snapshot := &types.Snapshot{}
err = proto.Unmarshal(bz, snapshot)
if err != nil {
return nil, errors.Wrapf(err, "failed to decode latest snapshot metadata %s", path)
}
return snapshot, nil
err = iter.Error()
return snapshot, errors.Wrap(err, "failed to find latest snapshot")
}
// List lists snapshots, in reverse order (newest first).
func (s *Store) List() ([]*types.Snapshot, error) {
metadata, err := os.ReadDir(s.pathMetadataDir())
iter, err := s.db.ReverseIterator(encodeKey(0, 0), encodeKey(uint64(math.MaxUint64), math.MaxUint32))
if err != nil {
return nil, errors.Wrap(err, "failed to list snapshot metadata")
return nil, errors.Wrap(err, "failed to list snapshots")
}
// file system may not guarantee the order of the files, so we sort them lexically
sort.Slice(metadata, func(i, j int) bool { return metadata[i].Name() < metadata[j].Name() })
defer iter.Close()
snapshots := make([]*types.Snapshot, len(metadata))
for i, entry := range metadata {
path := filepath.Join(s.pathMetadataDir(), entry.Name())
if err := s.validateMetadataPath(path); err != nil {
return nil, err
}
bz, err := os.ReadFile(path)
if err != nil {
return nil, errors.Wrapf(err, "failed to read snapshot metadata %s", entry.Name())
}
snapshots := make([]*types.Snapshot, 0)
for ; iter.Valid(); iter.Next() {
snapshot := &types.Snapshot{}
err = proto.Unmarshal(bz, snapshot)
err := proto.Unmarshal(iter.Value(), snapshot)
if err != nil {
return nil, errors.Wrapf(err, "failed to decode snapshot metadata %s", entry.Name())
return nil, errors.Wrap(err, "failed to decode snapshot info")
}
snapshots[len(metadata)-1-i] = snapshot
snapshots = append(snapshots, snapshot)
}
return snapshots, nil
return snapshots, iter.Error()
}
// Load loads a snapshot (both metadata and binary chunks). The chunks must be consumed and closed.
@ -205,20 +183,20 @@ func (s *Store) loadChunkFile(height uint64, format, chunk uint32) (io.ReadClose
// Prune removes old snapshots. The given number of most recent heights (regardless of format) are retained.
func (s *Store) Prune(retain uint32) (uint64, error) {
metadata, err := os.ReadDir(s.pathMetadataDir())
iter, err := s.db.ReverseIterator(encodeKey(0, 0), encodeKey(uint64(math.MaxUint64), math.MaxUint32))
if err != nil {
return 0, errors.Wrap(err, "failed to list snapshot metadata")
return 0, errors.Wrap(err, "failed to prune snapshots")
}
defer iter.Close()
pruned := uint64(0)
prunedHeights := make(map[uint64]bool)
skip := make(map[uint64]bool)
for i := len(metadata) - 1; i >= 0; i-- {
height, format, err := s.parseMetadataFilename(metadata[i].Name())
for ; iter.Valid(); iter.Next() {
height, format, err := decodeKey(iter.Key())
if err != nil {
return 0, err
return 0, errors.Wrap(err, "failed to prune snapshots")
}
if skip[height] || uint32(len(skip)) < retain {
skip[height] = true
continue
@ -240,7 +218,7 @@ func (s *Store) Prune(retain uint32) (uint64, error) {
}
}
}
return pruned, nil
return pruned, iter.Error()
}
// Save saves a snapshot to disk, returning it.
@ -249,7 +227,7 @@ func (s *Store) Save(
) (*types.Snapshot, error) {
defer DrainChunks(chunks)
if height == 0 {
return nil, errors.Wrap(storeerrors.ErrLogic, "snapshot height cannot be 0")
return nil, errors.Wrap(storetypes.ErrLogic, "snapshot height cannot be 0")
}
s.mtx.Lock()
@ -257,7 +235,7 @@ func (s *Store) Save(
s.saving[height] = true
s.mtx.Unlock()
if saving {
return nil, errors.Wrapf(storeerrors.ErrConflict,
return nil, errors.Wrapf(storetypes.ErrConflict,
"a snapshot for height %v is already being saved", height)
}
defer func() {
@ -266,24 +244,37 @@ func (s *Store) Save(
s.mtx.Unlock()
}()
exists, err := s.db.Has(encodeKey(height, format))
if err != nil {
return nil, err
}
if exists {
return nil, errors.Wrapf(storetypes.ErrConflict,
"snapshot already exists for height %v format %v", height, format)
}
snapshot := &types.Snapshot{
Height: height,
Format: format,
}
// create height directory or do nothing
if err := os.MkdirAll(s.pathHeight(height), 0o750); err != nil {
return nil, errors.Wrapf(err, "failed to create snapshot directory for height %v", height)
}
// create format directory or fail (if for example the format directory already exists)
if err := os.Mkdir(s.pathSnapshot(height, format), 0o750); err != nil {
return nil, errors.Wrapf(err, "failed to create snapshot directory for height %v format %v", height, format)
}
dirCreated := false
index := uint32(0)
snapshotHasher := sha256.New()
chunkHasher := sha256.New()
for chunkBody := range chunks {
// Only create the snapshot directory on encountering the first chunk.
// If the directory disappears during chunk saving,
// the whole operation will fail anyway.
if !dirCreated {
dir := s.pathSnapshot(height, format)
if err := os.MkdirAll(dir, 0o755); err != nil {
return nil, errors.Wrapf(err, "failed to create snapshot directory %q", dir)
}
dirCreated = true
}
if err := s.saveChunk(chunkBody, index, snapshot, chunkHasher, snapshotHasher); err != nil {
return nil, err
}
@ -336,11 +327,8 @@ func (s *Store) saveSnapshot(snapshot *types.Snapshot) error {
if err != nil {
return errors.Wrap(err, "failed to encode snapshot metadata")
}
err = os.WriteFile(s.pathMetadata(snapshot.Height, snapshot.Format), value, 0o600)
if err != nil {
return errors.Wrap(err, "failed to write snapshot metadata")
}
return nil
err = s.db.SetSync(encodeKey(snapshot.Height, snapshot.Format), value)
return errors.Wrap(err, "failed to store snapshot")
}
// pathHeight generates the path to a height, containing multiple snapshot formats.
@ -353,87 +341,29 @@ func (s *Store) pathSnapshot(height uint64, format uint32) string {
return filepath.Join(s.pathHeight(height), strconv.FormatUint(uint64(format), 10))
}
func (s *Store) pathMetadataDir() string {
return filepath.Join(s.dir, "metadata")
}
// pathMetadata generates a snapshot metadata path.
func (s *Store) pathMetadata(height uint64, format uint32) string {
return filepath.Join(s.pathMetadataDir(), fmt.Sprintf("%020d-%08d", height, format))
}
// PathChunk generates a snapshot chunk path.
func (s *Store) PathChunk(height uint64, format, chunk uint32) string {
return filepath.Join(s.pathSnapshot(height, format), strconv.FormatUint(uint64(chunk), 10))
}
func (s *Store) parseMetadataFilename(filename string) (height uint64, format uint32, err error) {
parts := strings.Split(filename, "-")
if len(parts) != 2 {
return 0, 0, fmt.Errorf("invalid snapshot metadata filename %s", filename)
}
height, err = strconv.ParseUint(parts[0], 10, 64)
if err != nil {
return 0, 0, errors.Wrapf(err, "invalid snapshot metadata filename %s", filename)
}
var f uint64
f, err = strconv.ParseUint(parts[1], 10, 32)
if err != nil {
return 0, 0, errors.Wrapf(err, "invalid snapshot metadata filename %s", filename)
}
format = uint32(f)
if filename != filepath.Base(s.pathMetadata(height, format)) {
return 0, 0, fmt.Errorf("invalid snapshot metadata filename %s", filename)
}
return height, format, nil
}
func (s *Store) validateMetadataPath(path string) error {
dir, f := filepath.Split(path)
if dir != fmt.Sprintf("%s/", s.pathMetadataDir()) {
return fmt.Errorf("invalid snapshot metadata path %s", path)
}
_, _, err := s.parseMetadataFilename(f)
return err
}
// legacyV1DecodeKey decodes a legacy snapshot key used in a raw kv store.
func legacyV1DecodeKey(k []byte) (uint64, uint32, error) {
// decodeKey decodes a snapshot key.
func decodeKey(k []byte) (uint64, uint32, error) {
if len(k) != 13 {
return 0, 0, errors.Wrapf(storeerrors.ErrLogic, "invalid snapshot key with length %v", len(k))
return 0, 0, errors.Wrapf(storetypes.ErrLogic, "invalid snapshot key with length %v", len(k))
}
if k[0] != keyPrefixSnapshot {
return 0, 0, errors.Wrapf(storeerrors.ErrLogic, "invalid snapshot key prefix %x", k[0])
return 0, 0, errors.Wrapf(storetypes.ErrLogic, "invalid snapshot key prefix %x", k[0])
}
height := binary.BigEndian.Uint64(k[1:9])
format := binary.BigEndian.Uint32(k[9:13])
return height, format, nil
}
// legacyV1EncodeKey encodes a snapshot key for use in a raw kv store.
func legacyV1EncodeKey(height uint64, format uint32) []byte {
// encodeKey encodes a snapshot key.
func encodeKey(height uint64, format uint32) []byte {
k := make([]byte, 13)
k[0] = keyPrefixSnapshot
binary.BigEndian.PutUint64(k[1:], height)
binary.BigEndian.PutUint32(k[9:], format)
return k
}
func (s *Store) MigrateFromV1(db store.RawDB) error {
itr, err := db.Iterator(legacyV1EncodeKey(0, 0), legacyV1EncodeKey(math.MaxUint64, math.MaxUint32))
if err != nil {
return err
}
defer itr.Close()
for ; itr.Valid(); itr.Next() {
height, format, err := legacyV1DecodeKey(itr.Key())
if err != nil {
return err
}
if err := os.WriteFile(s.pathMetadata(height, format), itr.Value(), 0o600); err != nil {
return errors.Wrapf(err, "failed to write snapshot metadata %q", s.pathMetadata(height, format))
}
}
return nil
}

View File

@ -4,20 +4,20 @@ import (
"bytes"
"errors"
"io"
"sync"
"testing"
"time"
db "github.com/cosmos/cosmos-db"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"cosmossdk.io/store/v2/snapshots"
"cosmossdk.io/store/v2/snapshots/types"
"cosmossdk.io/store/snapshots"
"cosmossdk.io/store/snapshots/types"
)
func setupStore(t *testing.T) *snapshots.Store {
t.Helper()
store, err := snapshots.NewStore(t.TempDir())
store, err := snapshots.NewStore(db.NewMemDB(), GetTempDir(t))
require.NoError(t, err)
_, err = store.Save(1, 1, makeChunks([][]byte{
@ -41,13 +41,14 @@ func setupStore(t *testing.T) *snapshots.Store {
}
func TestNewStore(t *testing.T) {
_, err := snapshots.NewStore(t.TempDir())
tempdir := GetTempDir(t)
_, err := snapshots.NewStore(db.NewMemDB(), tempdir)
require.NoError(t, err)
}
func TestNewStore_ErrNoDir(t *testing.T) {
_, err := snapshots.NewStore("")
_, err := snapshots.NewStore(db.NewMemDB(), "")
require.Error(t, err)
}
@ -262,7 +263,6 @@ func TestStore_Prune(t *testing.T) {
}
func TestStore_Save(t *testing.T) {
t.Parallel()
store := setupStore(t)
// Saving a snapshot should work
snapshot, err := store.Save(4, 1, makeChunks([][]byte{{1}, {2}}))
@ -319,39 +319,15 @@ func TestStore_Save(t *testing.T) {
// Saving a snapshot should error if a snapshot is already in progress for the same height,
// regardless of format. However, a different height should succeed.
var (
wgStart, wgDone sync.WaitGroup
mu sync.Mutex
gotErrHeights []uint64
)
srcHeights := []uint64{7, 7, 7, 8, 9}
wgStart.Add(len(srcHeights))
wgDone.Add(len(srcHeights))
for _, h := range srcHeights {
ch = make(chan io.ReadCloser, 1)
ch <- &ReadCloserMock{} // does not block on a buffered channel
close(ch)
go func(height uint64) {
wgStart.Done()
wgStart.Wait() // wait for all routines started
if _, err = store.Save(height, 1, ch); err != nil {
mu.Lock()
gotErrHeights = append(gotErrHeights, height)
mu.Unlock()
}
wgDone.Done()
}(h)
}
wgDone.Wait() // wait for all routines completed
assert.Equal(t, []uint64{7, 7}, gotErrHeights)
}
type ReadCloserMock struct{}
func (r ReadCloserMock) Read(p []byte) (n int, err error) {
return len(p), io.EOF
}
func (r ReadCloserMock) Close() error {
return nil
ch = make(chan io.ReadCloser)
go func() {
_, err := store.Save(7, 1, ch)
require.NoError(t, err)
}()
time.Sleep(10 * time.Millisecond)
_, err = store.Save(7, 2, makeChunks(nil))
require.Error(t, err)
_, err = store.Save(8, 1, makeChunks(nil))
require.NoError(t, err)
close(ch)
}

View File

@ -19,13 +19,6 @@ const (
snapshotCompressionLevel = 7
)
type WriteCloser interface {
protoio.WriteCloser
// CloseWithError closes the writer and sends an error to the reader.
CloseWithError(err error)
}
// StreamWriter set up a stream pipeline to serialize snapshot nodes:
// Exported Items -> delimited Protobuf -> zlib -> buffer -> chunkWriter -> chan io.ReadCloser
type StreamWriter struct {

View File

@ -2,7 +2,7 @@ package types
import (
abci "github.com/cometbft/cometbft/abci/types"
"github.com/cosmos/gogoproto/proto"
proto "github.com/cosmos/gogoproto/proto"
"cosmossdk.io/errors"
)

View File

@ -0,0 +1,18 @@
package types
// SnapshotOptions defines the snapshot strategy used when determining which
// heights are snapshotted for state sync.
type SnapshotOptions struct {
// Interval defines at which heights the snapshot is taken.
Interval uint64
// KeepRecent defines how many snapshots to keep in heights.
KeepRecent uint32
}
func NewSnapshotOptions(interval uint64, keepRecent uint32) SnapshotOptions {
return SnapshotOptions{
Interval: interval,
KeepRecent: keepRecent,
}
}

View File

@ -0,0 +1,56 @@
package types
import (
protoio "github.com/cosmos/gogoproto/io"
)
// Snapshotter is something that can create and restore snapshots, consisting of streamed binary
// chunks - all of which must be read from the channel and closed. If an unsupported format is
// given, it must return ErrUnknownFormat (possibly wrapped with fmt.Errorf).
type Snapshotter interface {
// Snapshot writes snapshot items into the protobuf writer.
Snapshot(height uint64, protoWriter protoio.Writer) error
// PruneSnapshotHeight prunes the given height according to the prune strategy.
// If PruneNothing, this is a no-op.
// If other strategy, this height is persisted until it is
// less than <current height> - KeepRecent and <current height> % Interval == 0
PruneSnapshotHeight(height int64)
// SetSnapshotInterval sets the interval at which the snapshots are taken.
// It is used by the store that implements the Snapshotter interface
// to determine which heights to retain until after the snapshot is complete.
SetSnapshotInterval(snapshotInterval uint64)
// Restore restores a state snapshot, taking the reader of protobuf message stream as input.
Restore(height uint64, format uint32, protoReader protoio.Reader) (SnapshotItem, error)
}
// ExtensionPayloadReader read extension payloads,
// it returns io.EOF when reached either end of stream or the extension boundaries.
type ExtensionPayloadReader = func() ([]byte, error)
// ExtensionPayloadWriter is a helper to write extension payloads to underlying stream.
type ExtensionPayloadWriter = func([]byte) error
// ExtensionSnapshotter is an extension Snapshotter that is appended to the snapshot stream.
// ExtensionSnapshotter has an unique name and manages it's own internal formats.
type ExtensionSnapshotter interface {
// SnapshotName returns the name of snapshotter, it should be unique in the manager.
SnapshotName() string
// SnapshotFormat returns the default format the extension snapshotter use to encode the
// payloads when taking a snapshot.
// It's defined within the extension, different from the global format for the whole state-sync snapshot.
SnapshotFormat() uint32
// SupportedFormats returns a list of formats it can restore from.
SupportedFormats() []uint32
// SnapshotExtension writes extension payloads into the underlying protobuf stream.
SnapshotExtension(height uint64, payloadWriter ExtensionPayloadWriter) error
// RestoreExtension restores an extension state snapshot,
// the payload reader returns `io.EOF` when reached the extension boundaries.
RestoreExtension(height uint64, format uint32, payloadReader ExtensionPayloadReader) error
}

View File

@ -1,8 +1,6 @@
package types
import (
"encoding/binary"
protoio "github.com/cosmos/gogoproto/io"
)
@ -16,20 +14,3 @@ func WriteExtensionPayload(protoWriter protoio.Writer, payload []byte) error {
},
})
}
// Uint64ToBigEndian - marshals uint64 to a big endian byte slice so it can be sorted
func Uint64ToBigEndian(i uint64) []byte {
b := make([]byte, 8)
binary.BigEndian.PutUint64(b, i)
return b
}
// BigEndianToUint64 returns an uint64 from big endian encoded bytes. If encoding
// is empty, zero is returned.
func BigEndianToUint64(bz []byte) uint64 {
if len(bz) == 0 {
return 0
}
return binary.BigEndian.Uint64(bz)
}

View File

@ -1,107 +1,19 @@
package store
import (
"io"
dbm "github.com/cosmos/cosmos-db"
coreheader "cosmossdk.io/core/header"
corestore "cosmossdk.io/core/store"
"cosmossdk.io/store/v2/metrics"
"cosmossdk.io/store/v2/proof"
"cosmossdk.io/log"
"cosmossdk.io/store/cache"
"cosmossdk.io/store/metrics"
"cosmossdk.io/store/rootmulti"
"cosmossdk.io/store/types"
)
// RootStore defines an abstraction layer containing a State Storage (SS) engine
// and one or more State Commitment (SC) engines.
type RootStore interface {
// StateLatest returns a read-only version of the RootStore at the latest
// height, alongside the associated version.
StateLatest() (uint64, corestore.ReaderMap, error)
// StateAt is analogous to StateLatest() except it returns a read-only version
// of the RootStore at the provided version. If such a version cannot be found,
// an error must be returned.
StateAt(version uint64) (corestore.ReaderMap, error)
// GetStateStorage returns the SS backend.
GetStateStorage() VersionedDatabase
// GetStateCommitment returns the SC backend.
GetStateCommitment() Committer
// Query performs a query on the RootStore for a given store key, version (height),
// and key tuple. Queries should be routed to the underlying SS engine.
Query(storeKey []byte, version uint64, key []byte, prove bool) (QueryResult, error)
// LoadVersion loads the RootStore to the given version.
LoadVersion(version uint64) error
// LoadLatestVersion behaves identically to LoadVersion except it loads the
// latest version implicitly.
LoadLatestVersion() error
// GetLatestVersion returns the latest version, i.e. height, committed.
GetLatestVersion() (uint64, error)
// SetInitialVersion sets the initial version on the RootStore.
SetInitialVersion(v uint64) error
// SetCommitHeader sets the commit header for the next commit. This call and
// implementation is optional. However, it must be supported in cases where
// queries based on block time need to be supported.
SetCommitHeader(h *coreheader.Info)
// WorkingHash returns the current WIP commitment hash by applying the Changeset
// to the SC backend. Typically, WorkingHash() is called prior to Commit() and
// must be applied with the exact same Changeset. This is because WorkingHash()
// is responsible for writing the Changeset to the SC backend and returning the
// resulting root hash. Then, Commit() would return this hash and flush writes
// to disk.
WorkingHash(cs *corestore.Changeset) ([]byte, error)
// Commit should be responsible for taking the provided changeset and flushing
// it to disk. Note, depending on the implementation, the changeset, at this
// point, may already be written to the SC backends. Commit() should ensure
// the changeset is committed to all SC and SC backends and flushed to disk.
// It must return a hash of the merkle-ized committed state. This hash should
// be the same as the hash returned by WorkingHash() prior to calling Commit().
Commit(cs *corestore.Changeset) ([]byte, error)
// LastCommitID returns a CommitID pertaining to the last commitment.
LastCommitID() (proof.CommitID, error)
// Prune prunes the RootStore to the provided version. It is used to remove
// old versions of the RootStore by the CLI.
Prune(version uint64) error
// StartMigration starts a migration process to migrate the RootStore/v1 to the
// SS and SC backends of store/v2.
// It runs in a separate goroutine and replaces the current RootStore with the
// migrated new backends once the migration is complete.
StartMigration() error
// SetMetrics sets the telemetry handler on the RootStore.
SetMetrics(m metrics.Metrics)
io.Closer
func NewCommitMultiStore(db dbm.DB, logger log.Logger, metricGatherer metrics.StoreMetrics) types.CommitMultiStore {
return rootmulti.NewStore(db, logger, metricGatherer)
}
// UpgradeableRootStore extends the RootStore interface to support loading versions
// with upgrades.
type UpgradeableRootStore interface {
RootStore
// LoadVersionAndUpgrade behaves identically to LoadVersion except it also
// accepts a StoreUpgrades object that defines a series of transformations to
// apply to store keys (if any).
//
// Note, handling StoreUpgrades is optional depending on the underlying RootStore
// implementation.
LoadVersionAndUpgrade(version uint64, upgrades *corestore.StoreUpgrades) error
}
// QueryResult defines the response type to performing a query on a RootStore.
type QueryResult struct {
Key []byte
Value []byte
Version uint64
ProofOps []proof.CommitmentOp
func NewCommitKVStoreCacheManager() types.MultiStorePersistentCache {
return cache.NewCommitKVStoreCacheManager(cache.DefaultCommitKVStoreCacheSize)
}

30
store/streaming/README.md Normal file
View File

@ -0,0 +1,30 @@
# Cosmos-SDK Plugins
This package contains an extensible plugin system for the Cosmos-SDK. The plugin system leverages the [hashicorp/go-plugin](https://github.com/hashicorp/go-plugin) system. This system is designed to work over RPC.
Although the `go-plugin` is built to work over RPC, it is currently only designed to work over a local network.
## Pre requisites
For an overview of supported features by the `go-plugin` system, please see https://github.com/hashicorp/go-plugin. The `go-plugin` documentation is located [here](https://github.com/hashicorp/go-plugin/tree/master/docs). You can also directly visit any of the links below:
* [Writing plugins without Go](https://github.com/hashicorp/go-plugin/blob/master/docs/guide-plugin-write-non-go.md)
* [Go Plugin Tutorial](https://github.com/hashicorp/go-plugin/blob/master/docs/extensive-go-plugin-tutorial.md)
* [Plugin Internals](https://github.com/hashicorp/go-plugin/blob/master/docs/internals.md)
* [Plugin Architecture](https://www.youtube.com/watch?v=SRvm3zQQc1Q) (start here)
## Exposing plugins
To expose plugins to the plugin system, you will need to:
1. Implement the gRPC message protocol service of the plugin
2. Build the plugin binary
3. Export it
Read the plugin documentation in the [Streaming Plugins](#streaming-plugins) section for examples on how to build a plugin.
## Streaming Plugins
List of support streaming plugins
* [ABCI State Streaming Plugin](abci/README.md)

View File

@ -0,0 +1,210 @@
# ABCI and State Streaming Plugin (gRPC)
The `BaseApp` package contains the interface for a [ABCIListener](https://github.com/cosmos/cosmos-sdk/blob/main/baseapp/streaming.go)
service used to write state changes out from individual KVStores to external systems,
as described in [ADR-038](https://github.com/cosmos/cosmos-sdk/blob/main/docs/architecture/adr-038-state-listening.md).
Specific `ABCIListener` service implementations are written and loaded as [hashicorp/go-plugin](https://github.com/hashicorp/go-plugin).
## Implementation
In this section we describe the implementation of the `ABCIListener` interface as a gRPC service.
### Service Protocol
The companion service protocol for the `ABCIListener` interface is described below.
See [proto/cosmos/store/streaming/abci/grpc.proto](https://github.com/cosmos/cosmos-sdk/blob/main/proto/cosmos/store/streaming/abci/grpc.proto) for full details.
```protobuf reference
https://github.com/cosmos/cosmos-sdk/blob/6cee22df52eb0cbb30e351fbb41f66d26c1f8300/proto/cosmos/store/streaming/abci/grpc.proto#L1-L36
```
### Generating the Code
To generate the stubs the local client implementation can call, run the following command:
```shell
make proto-gen
```
For other languages you'll need to [download](https://github.com/cosmos/cosmos-sdk/blob/main/third_party/proto/README.md)
the CosmosSDK protos into your project and compile. For language specific compilation instructions visit
[https://github.com/grpc](https://github.com/grpc) and look in the `examples` folder of your
language of choice `https://github.com/grpc/grpc-{language}/tree/master/examples` and [https://grpc.io](https://grpc.io)
for the documentation.
### gRPC Client and Server
Implementing the ABCIListener gRPC client and server is a simple and straight forward process.
To create the client and server we create a `ListenerGRPCPlugin` struct that implements the
`plugin.GRPCPlugin` interface and a `Impl` property that will contain a concrete implementation
of the `ABCIListener` plugin written in Go.
#### The Interface
The `BaseApp` `ABCIListener` interface will be what will define the plugins capabilities.
Boilerplate RPC implementation example of the `ABCIListener` interface. ([store/streaming/abci/grpc.go](https://github.com/cosmos/cosmos-sdk/blob/main/store/streaming/abci/grpc.go))
```go reference
https://github.com/cosmos/cosmos-sdk/blob/f851e188b3b9d46e7c63fa514ad137e6d558fdd9/store/streaming/abci/grpc.go#L13-L79
```
Our `ABCIlistener` service plugin. ([store/streaming/plugins/abci/v1/interface.go](interface.go))
```go reference
https://github.com/cosmos/cosmos-sdk/blob/f851e188b3b9d46e7c63fa514ad137e6d558fdd9/store/streaming/abci/interface.go#L13-L45
```
#### Plugin Implementation
Plugin implementations can be in a completely separate package but will need access
to the `ABCIListener` interface. One thing to note here is that plugin implementations
defined in the `ListenerGRPCPlugin.Impl` property are **only** required when building
plugins in Go. They are pre-compiled into Go modules. The `GRPCServer.Impl` calls methods
on this out-of-process plugin.
For Go plugins this is all that is required to process data that is sent over gRPC.
This provides the advantage of writing quick plugins that process data to different
external systems (i.e: DB, File, DB, Kafka, etc.) without the need for implementing
the gRPC server endpoints.
```go
// MyPlugin is the implementation of the ABCIListener interface
// For Go plugins this is all that is required to process data sent over gRPC.
type MyPlugin struct {
...
}
func (a FilePlugin) ListenFinalizeBlock(ctx context.Context, req abci.RequestBeginBlock, res abci.ResponseBeginBlock) error {
// process data
return nil
}
func (a FilePlugin) ListenCommit(ctx context.Context, res abci.ResponseCommit, changeSet []*store.StoreKVPair) error {
// process data
return nil
}
func main() {
plugin.Serve(&plugin.ServeConfig{
HandshakeConfig: v1.Handshake,
Plugins: map[string]plugin.Plugin{
"abci": &ABCIListenerGRPCPlugin{Impl: &MyPlugin{}},
},
// A non-nil value here enables gRPC serving for this streaming...
GRPCServer: plugin.DefaultGRPCServer,
})
}
```
## Plugin Loading System
A general purpose plugin loading system has been provided by the SDK to be able to load not just
the `ABCIListener` service plugin but other protocol services as well. You can take a look
at how plugins are loaded by the SDK in [store/streaming/streaming.go](https://github.com/cosmos/cosmos-sdk/blob/main/store/streaming/streaming.go)
You'll need to add this in your `app.go`
```go
// app.go
func NewApp(...) *App {
...
// register streaming services
streamingCfg := cast.ToStringMap(appOpts.Get(baseapp.StreamingTomlKey))
for service := range streamingCfg {
pluginKey := fmt.Sprintf("%s.%s.%s", baseapp.StreamingTomlKey, service, baseapp.StreamingABCIPluginTomlKey)
pluginName := strings.TrimSpace(cast.ToString(appOpts.Get(pluginKey)))
if len(pluginName) > 0 {
logLevel := cast.ToString(appOpts.Get(flags.FlagLogLevel))
plugin, err := streaming.NewStreamingPlugin(pluginName, logLevel)
if err != nil {
tmos.Exit(err.Error())
}
if err := baseapp.RegisterStreamingPlugin(bApp, appOpts, keys, plugin); err != nil {
tmos.Exit(err.Error())
}
}
}
...
}
```
## Configuration
Update the streaming section in `app.toml`
```toml
# Streaming allows nodes to stream state to external systems
[streaming]
# streaming.abci specifies the configuration for the ABCI Listener streaming service
[streaming.abci]
# List of kv store keys to stream out via gRPC
# Set to ["*"] to expose all keys.
keys = ["*"]
# The plugin name used for streaming via gRPC
# Supported plugins: abci
plugin = "abci"
# stop-node-on-err specifies whether to stop the node when the
stop-node-on-err = true
```
## Updating the protocol
If you update the protocol buffers file, you can regenerate the file and plugins using the
following commands from the project root directory. You do not need to run this if you're
just trying the examples, you can skip ahead to the [Testing](#testing) section.
```shell
make proto-gen
```
* stdout plugin; from inside the `store/` dir, run:
```shell
go build -o streaming/abci/examples/stdout/stdout streaming/abci/examples/stdout/stdout.go
```
* file plugin (writes to `~/`); from inside the `store/` dir, run:
```shell
go build -o streaming/abci/examples/file/file streaming/abci/examples/file/file.go
```
### Testing
Export a plugin from one of the Go or Python examples.
* stdout plugin
```shell
export COSMOS_SDK_ABCI="{path to}/cosmos-sdk/store/streaming/abci/examples/stdout/stdout"
```
* file plugin (writes to ~/)
```shell
export COSMOS_SDK_ABCI="{path to}/cosmos-sdk/store/streaming/abci/examples/file/file"
```
where `{path to}` is the parent path to the `cosmos-sdk` repo on you system.
Test:
```shell
make test-sim-nondeterminism-streaming
```
The plugin system will look for the plugin binary in the `env` variable `COSMOS_SDK_{PLUGIN_NAME}` above
and if it does not find it, it will error out. The plugin UPPERCASE name is that of the
`streaming.abci.plugin` TOML configuration setting.

View File

@ -0,0 +1,2 @@
# ignore the file plugin binary
file

View File

@ -0,0 +1,17 @@
# File Plugin
The file plugin is an example plugin written in Go. It is intended for local testing and should not be used in production environments.
## Build
To build the plugin run the following command:
```shell
cd store
```
```shell
go build -o streaming/abci/examples/file/file streaming/abci/examples/file/file.go
```
* The plugin will write files to the users home directory `~/`.

View File

@ -0,0 +1,81 @@
package main
import (
"context"
"fmt"
"os"
"path/filepath"
abci "github.com/cometbft/cometbft/abci/types"
"github.com/hashicorp/go-plugin"
streamingabci "cosmossdk.io/store/streaming/abci"
store "cosmossdk.io/store/types"
)
// FilePlugin is the implementation of the baseapp.ABCIListener interface
// For Go plugins this is all that is required to process data sent over gRPC.
type FilePlugin struct {
BlockHeight int64
}
func (a *FilePlugin) writeToFile(file string, data []byte) error {
home, err := os.UserHomeDir()
if err != nil {
return err
}
filename := fmt.Sprintf("%s/%s.txt", home, file)
f, err := os.OpenFile(filepath.Clean(filename), os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0o600)
if err != nil {
return err
}
if _, err := f.Write(data); err != nil {
f.Close() // ignore error; Write error takes precedence
return err
}
if err := f.Close(); err != nil {
return err
}
return nil
}
func (a *FilePlugin) ListenFinalizeBlock(ctx context.Context, req abci.RequestFinalizeBlock, res abci.ResponseFinalizeBlock) error {
d1 := []byte(fmt.Sprintf("%d:::%v\n", a.BlockHeight, req))
d2 := []byte(fmt.Sprintf("%d:::%v\n", a.BlockHeight, req))
if err := a.writeToFile("finalize-block-req", d1); err != nil {
return err
}
if err := a.writeToFile("finalize-block-res", d2); err != nil {
return err
}
return nil
}
func (a *FilePlugin) ListenCommit(ctx context.Context, res abci.ResponseCommit, changeSet []*store.StoreKVPair) error {
fmt.Printf("listen-commit: block_height=%d data=%v", res.RetainHeight, changeSet)
d1 := []byte(fmt.Sprintf("%d:::%v\n", a.BlockHeight, res))
d2 := []byte(fmt.Sprintf("%d:::%v\n", a.BlockHeight, changeSet))
if err := a.writeToFile("commit-res", d1); err != nil {
return err
}
if err := a.writeToFile("state-change", d2); err != nil {
return err
}
return nil
}
func main() {
plugin.Serve(&plugin.ServeConfig{
HandshakeConfig: streamingabci.Handshake,
Plugins: map[string]plugin.Plugin{
"abci": &streamingabci.ListenerGRPCPlugin{Impl: &FilePlugin{}},
},
// A non-nil value here enables gRPC serving for this streaming...
GRPCServer: plugin.DefaultGRPCServer,
})
}

Binary file not shown.

View File

@ -0,0 +1,43 @@
package main
import (
"context"
"fmt"
abci "github.com/cometbft/cometbft/abci/types"
"github.com/hashicorp/go-plugin"
streamingabci "cosmossdk.io/store/streaming/abci"
store "cosmossdk.io/store/types"
)
// StdoutPlugin is the implementation of the ABCIListener interface
// For Go plugins this is all that is required to process data sent over gRPC.
type StdoutPlugin struct {
BlockHeight int64
}
func (a *StdoutPlugin) ListenFinalizeBlock(ctx context.Context, req abci.RequestFinalizeBlock, res abci.ResponseFinalizeBlock) error {
a.BlockHeight = req.Height
// process tx messages (i.e: sent to external system)
fmt.Printf("listen-finalize-block: block-height=%d req=%v res=%v", a.BlockHeight, req, res)
return nil
}
func (a *StdoutPlugin) ListenCommit(ctx context.Context, res abci.ResponseCommit, changeSet []*store.StoreKVPair) error {
// process block commit messages (i.e: sent to external system)
fmt.Printf("listen-commit: block_height=%d res=%v data=%v", a.BlockHeight, res, changeSet)
return nil
}
func main() {
plugin.Serve(&plugin.ServeConfig{
HandshakeConfig: streamingabci.Handshake,
Plugins: map[string]plugin.Plugin{
"abci": &streamingabci.ListenerGRPCPlugin{Impl: &StdoutPlugin{}},
},
// A non-nil value here enables gRPC serving for this streaming...
GRPCServer: plugin.DefaultGRPCServer,
})
}

View File

@ -0,0 +1,79 @@
package abci
import (
"context"
"os"
abci "github.com/cometbft/cometbft/abci/types"
"github.com/hashicorp/go-plugin"
storetypes "cosmossdk.io/store/types"
)
var _ storetypes.ABCIListener = (*GRPCClient)(nil)
// GRPCClient is an implementation of the ABCIListener interface that talks over RPC.
type GRPCClient struct {
client ABCIListenerServiceClient
}
// ListenEndBlock listens to end block request and responses.
// In addition, it retrieves a types.Context from a context.Context instance.
// It panics if a types.Context was not properly attached.
// When the node is configured to stop on listening errors,
// it will terminate immediately and exit with a non-zero code.
func (m *GRPCClient) ListenFinalizeBlock(goCtx context.Context, req abci.RequestFinalizeBlock, res abci.ResponseFinalizeBlock) error {
ctx := goCtx.(storetypes.Context)
sm := ctx.StreamingManager()
request := &ListenFinalizeBlockRequest{Req: &req, Res: &res}
_, err := m.client.ListenFinalizeBlock(goCtx, request)
if err != nil && sm.StopNodeOnErr {
ctx.Logger().Error("FinalizeBlock listening hook failed", "height", ctx.BlockHeight(), "err", err)
cleanupAndExit()
}
return err
}
// ListenCommit listens to commit responses and state changes for the current block.
// In addition, it retrieves a types.Context from a context.Context instance.
// It panics if a types.Context was not properly attached.
// When the node is configured to stop on listening errors,
// it will terminate immediately and exit with a non-zero code.
func (m *GRPCClient) ListenCommit(goCtx context.Context, res abci.ResponseCommit, changeSet []*storetypes.StoreKVPair) error {
ctx := goCtx.(storetypes.Context)
sm := ctx.StreamingManager()
request := &ListenCommitRequest{BlockHeight: ctx.BlockHeight(), Res: &res, ChangeSet: changeSet}
_, err := m.client.ListenCommit(goCtx, request)
if err != nil && sm.StopNodeOnErr {
ctx.Logger().Error("Commit listening hook failed", "height", ctx.BlockHeight(), "err", err)
cleanupAndExit()
}
return err
}
func cleanupAndExit() {
plugin.CleanupClients()
os.Exit(1)
}
var _ ABCIListenerServiceServer = (*GRPCServer)(nil)
// GRPCServer is the gRPC server that GRPCClient talks to.
type GRPCServer struct {
// This is the real implementation
Impl storetypes.ABCIListener
}
func (m GRPCServer) ListenFinalizeBlock(ctx context.Context, request *ListenFinalizeBlockRequest) (*ListenFinalizeBlockResponse, error) {
if err := m.Impl.ListenFinalizeBlock(ctx, *request.Req, *request.Res); err != nil {
return nil, err
}
return &ListenFinalizeBlockResponse{}, nil
}
func (m GRPCServer) ListenCommit(ctx context.Context, request *ListenCommitRequest) (*ListenCommitResponse, error) {
if err := m.Impl.ListenCommit(ctx, *request.Res, request.ChangeSet); err != nil {
return nil, err
}
return &ListenCommitResponse{}, nil
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,45 @@
// Package abci contains shared data between the host and plugins.
package abci
import (
"context"
"github.com/hashicorp/go-plugin"
"google.golang.org/grpc"
storetypes "cosmossdk.io/store/types"
)
// Handshake is a common handshake that is shared by streaming and host.
// This prevents users from executing bad plugins or executing a plugin
// directory. It is a UX feature, not a security feature.
var Handshake = plugin.HandshakeConfig{
// This isn't required when using VersionedPlugins
ProtocolVersion: 1,
MagicCookieKey: "ABCI_LISTENER_PLUGIN",
MagicCookieValue: "ef78114d-7bdf-411c-868f-347c99a78345",
}
var _ plugin.GRPCPlugin = (*ListenerGRPCPlugin)(nil)
// ListenerGRPCPlugin is the implementation of plugin.GRPCPlugin, so we can serve/consume this.
type ListenerGRPCPlugin struct {
// GRPCPlugin must still implement the Plugin interface
plugin.Plugin
// Concrete implementation, written in Go. This is only used for plugins
// that are written in Go.
Impl storetypes.ABCIListener
}
func (p *ListenerGRPCPlugin) GRPCServer(_ *plugin.GRPCBroker, s *grpc.Server) error {
RegisterABCIListenerServiceServer(s, &GRPCServer{Impl: p.Impl})
return nil
}
func (p *ListenerGRPCPlugin) GRPCClient(
_ context.Context,
_ *plugin.GRPCBroker,
c *grpc.ClientConn,
) (interface{}, error) {
return &GRPCClient{client: NewABCIListenerServiceClient(c)}, nil
}

View File

@ -0,0 +1,79 @@
package streaming
import (
"fmt"
"os"
"os/exec"
"strings"
"github.com/hashicorp/go-hclog"
"github.com/hashicorp/go-plugin"
streamingabci "cosmossdk.io/store/streaming/abci"
)
const pluginEnvKeyPrefix = "COSMOS_SDK"
// HandshakeMap contains a map of each supported streaming's handshake config
var HandshakeMap = map[string]plugin.HandshakeConfig{
"abci": streamingabci.Handshake,
}
// PluginMap contains a map of supported gRPC plugins
var PluginMap = map[string]plugin.Plugin{
"abci": &streamingabci.ListenerGRPCPlugin{},
}
func GetPluginEnvKey(name string) string {
return fmt.Sprintf("%s_%s", pluginEnvKeyPrefix, strings.ToUpper(name))
}
func NewStreamingPlugin(name, logLevel string) (interface{}, error) {
logger := hclog.New(&hclog.LoggerOptions{
Output: hclog.DefaultOutput,
Level: toHclogLevel(logLevel),
Name: fmt.Sprintf("plugin.%s", name),
})
// We're a host. Start by launching the streaming process.
env := os.Getenv(GetPluginEnvKey(name))
client := plugin.NewClient(&plugin.ClientConfig{
HandshakeConfig: HandshakeMap[name],
Managed: true,
Plugins: PluginMap,
// For verifying the integrity of executables see SecureConfig documentation
// https://pkg.go.dev/github.com/hashicorp/go-plugin#SecureConfig
//#nosec G204 -- Required to load plugins
Cmd: exec.Command("sh", "-c", env),
Logger: logger,
AllowedProtocols: []plugin.Protocol{
plugin.ProtocolNetRPC, plugin.ProtocolGRPC,
},
})
// Connect via RPC
rpcClient, err := client.Client()
if err != nil {
return nil, err
}
// Request streaming plugin
return rpcClient.Dispense(name)
}
func toHclogLevel(s string) hclog.Level {
switch s {
case "trace":
return hclog.Trace
case "debug":
return hclog.Debug
case "info":
return hclog.Info
case "warn":
return hclog.Warn
case "error":
return hclog.Error
default:
return hclog.DefaultLevel
}
}

View File

@ -0,0 +1,178 @@
package streaming
import (
"context"
"fmt"
"os"
"runtime"
"testing"
"time"
abci "github.com/cometbft/cometbft/abci/types"
tmproto "github.com/cometbft/cometbft/proto/tendermint/types"
"github.com/cosmos/gogoproto/proto"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
"cosmossdk.io/log"
storetypes "cosmossdk.io/store/types"
)
type PluginTestSuite struct {
suite.Suite
loggerCtx MockContext
workDir string
finalizeBlockReq abci.RequestFinalizeBlock
finalizeBlockRes abci.ResponseFinalizeBlock
commitRes abci.ResponseCommit
changeSet []*storetypes.StoreKVPair
}
func (s *PluginTestSuite) SetupTest() {
if runtime.GOOS != "linux" {
s.T().Skip("only run on linux")
}
path, err := os.Getwd()
if err != nil {
s.T().Fail()
}
s.workDir = path
pluginVersion := "abci"
// to write data to files, replace stdout/stdout => file/file
pluginPath := fmt.Sprintf("%s/abci/examples/stdout/stdout", s.workDir)
if err := os.Setenv(GetPluginEnvKey(pluginVersion), pluginPath); err != nil {
s.T().Fail()
}
raw, err := NewStreamingPlugin(pluginVersion, "trace")
require.NoError(s.T(), err, "load", "streaming", "unexpected error")
abciListener, ok := raw.(storetypes.ABCIListener)
require.True(s.T(), ok, "should pass type check")
header := tmproto.Header{Height: 1, Time: time.Now()}
logger := log.NewNopLogger()
streamingService := storetypes.StreamingManager{
ABCIListeners: []storetypes.ABCIListener{abciListener},
StopNodeOnErr: true,
}
s.loggerCtx = NewMockContext(header, logger, streamingService)
// test abci message types
s.finalizeBlockReq = abci.RequestFinalizeBlock{
Height: s.loggerCtx.BlockHeight(),
Txs: [][]byte{{1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}, {9}},
Misbehavior: []abci.Misbehavior{},
Hash: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9},
DecidedLastCommit: abci.CommitInfo{},
}
s.finalizeBlockRes = abci.ResponseFinalizeBlock{
Events: []abci.Event{},
ConsensusParamUpdates: &tmproto.ConsensusParams{},
ValidatorUpdates: []abci.ValidatorUpdate{},
TxResults: []*abci.ExecTxResult{{
Events: []abci.Event{},
Code: 1,
Codespace: "mockCodeSpace",
Data: []byte{5, 6, 7, 8},
GasUsed: 2,
GasWanted: 3,
Info: "mockInfo",
Log: "mockLog",
}},
}
s.commitRes = abci.ResponseCommit{}
// test store kv pair types
for range [2000]int{} {
s.changeSet = append(s.changeSet, &storetypes.StoreKVPair{
StoreKey: "mockStore",
Delete: false,
Key: []byte{1, 2, 3},
Value: []byte{3, 2, 1},
})
}
}
func TestPluginTestSuite(t *testing.T) {
suite.Run(t, new(PluginTestSuite))
}
func (s *PluginTestSuite) TestABCIGRPCPlugin() {
s.T().Run("Should successfully load streaming", func(t *testing.T) {
abciListeners := s.loggerCtx.StreamingManager().ABCIListeners
for _, abciListener := range abciListeners {
for i := range [50]int{} {
err := abciListener.ListenFinalizeBlock(s.loggerCtx, s.finalizeBlockReq, s.finalizeBlockRes)
assert.NoError(t, err, "ListenEndBlock")
err = abciListener.ListenCommit(s.loggerCtx, s.commitRes, s.changeSet)
assert.NoError(t, err, "ListenCommit")
s.updateHeight(int64(i + 1))
}
}
})
}
func (s *PluginTestSuite) updateHeight(n int64) {
header := s.loggerCtx.BlockHeader()
header.Height = n
s.loggerCtx = NewMockContext(header, s.loggerCtx.Logger(), s.loggerCtx.StreamingManager())
}
var (
_ context.Context = MockContext{}
_ storetypes.Context = MockContext{}
)
type MockContext struct {
baseCtx context.Context
header tmproto.Header
logger log.Logger
streamingManager storetypes.StreamingManager
}
func (m MockContext) BlockHeight() int64 { return m.header.Height }
func (m MockContext) Logger() log.Logger { return m.logger }
func (m MockContext) StreamingManager() storetypes.StreamingManager { return m.streamingManager }
func (m MockContext) BlockHeader() tmproto.Header {
msg := proto.Clone(&m.header).(*tmproto.Header)
return *msg
}
func NewMockContext(header tmproto.Header, logger log.Logger, sm storetypes.StreamingManager) MockContext {
header.Time = header.Time.UTC()
return MockContext{
baseCtx: context.Background(),
header: header,
logger: logger,
streamingManager: sm,
}
}
func (m MockContext) Deadline() (deadline time.Time, ok bool) {
return m.baseCtx.Deadline()
}
func (m MockContext) Done() <-chan struct{} {
return m.baseCtx.Done()
}
func (m MockContext) Err() error {
return m.baseCtx.Err()
}
func (m MockContext) Value(key any) any {
return m.baseCtx.Value(key)
}

202
store/tracekv/store.go Normal file
View File

@ -0,0 +1,202 @@
package tracekv
import (
"encoding/base64"
"encoding/json"
"io"
"cosmossdk.io/errors"
"cosmossdk.io/store/types"
)
const (
writeOp operation = "write"
readOp operation = "read"
deleteOp operation = "delete"
iterKeyOp operation = "iterKey"
iterValueOp operation = "iterValue"
)
type (
// Store implements the KVStore interface with tracing enabled.
// Operations are traced on each core KVStore call and written to the
// underlying io.writer.
//
// TODO: Should we use a buffered writer and implement Commit on
// Store?
Store struct {
parent types.KVStore
writer io.Writer
context types.TraceContext
}
// operation represents an IO operation
operation string
// traceOperation implements a traced KVStore operation
traceOperation struct {
Operation operation `json:"operation"`
Key string `json:"key"`
Value string `json:"value"`
Metadata map[string]interface{} `json:"metadata"`
}
)
// NewStore returns a reference to a new traceKVStore given a parent
// KVStore implementation and a buffered writer.
func NewStore(parent types.KVStore, writer io.Writer, tc types.TraceContext) *Store {
return &Store{parent: parent, writer: writer, context: tc}
}
// Get implements the KVStore interface. It traces a read operation and
// delegates a Get call to the parent KVStore.
func (tkv *Store) Get(key []byte) []byte {
value := tkv.parent.Get(key)
writeOperation(tkv.writer, readOp, tkv.context, key, value)
return value
}
// Set implements the KVStore interface. It traces a write operation and
// delegates the Set call to the parent KVStore.
func (tkv *Store) Set(key, value []byte) {
types.AssertValidKey(key)
writeOperation(tkv.writer, writeOp, tkv.context, key, value)
tkv.parent.Set(key, value)
}
// Delete implements the KVStore interface. It traces a write operation and
// delegates the Delete call to the parent KVStore.
func (tkv *Store) Delete(key []byte) {
writeOperation(tkv.writer, deleteOp, tkv.context, key, nil)
tkv.parent.Delete(key)
}
// Has implements the KVStore interface. It delegates the Has call to the
// parent KVStore.
func (tkv *Store) Has(key []byte) bool {
return tkv.parent.Has(key)
}
// Iterator implements the KVStore interface. It delegates the Iterator call
// to the parent KVStore.
func (tkv *Store) Iterator(start, end []byte) types.Iterator {
return tkv.iterator(start, end, true)
}
// ReverseIterator implements the KVStore interface. It delegates the
// ReverseIterator call to the parent KVStore.
func (tkv *Store) ReverseIterator(start, end []byte) types.Iterator {
return tkv.iterator(start, end, false)
}
// iterator facilitates iteration over a KVStore. It delegates the necessary
// calls to it's parent KVStore.
func (tkv *Store) iterator(start, end []byte, ascending bool) types.Iterator {
var parent types.Iterator
if ascending {
parent = tkv.parent.Iterator(start, end)
} else {
parent = tkv.parent.ReverseIterator(start, end)
}
return newTraceIterator(tkv.writer, parent, tkv.context)
}
type traceIterator struct {
parent types.Iterator
writer io.Writer
context types.TraceContext
}
func newTraceIterator(w io.Writer, parent types.Iterator, tc types.TraceContext) types.Iterator {
return &traceIterator{writer: w, parent: parent, context: tc}
}
// Domain implements the Iterator interface.
func (ti *traceIterator) Domain() (start, end []byte) {
return ti.parent.Domain()
}
// Valid implements the Iterator interface.
func (ti *traceIterator) Valid() bool {
return ti.parent.Valid()
}
// Next implements the Iterator interface.
func (ti *traceIterator) Next() {
ti.parent.Next()
}
// Key implements the Iterator interface.
func (ti *traceIterator) Key() []byte {
key := ti.parent.Key()
writeOperation(ti.writer, iterKeyOp, ti.context, key, nil)
return key
}
// Value implements the Iterator interface.
func (ti *traceIterator) Value() []byte {
value := ti.parent.Value()
writeOperation(ti.writer, iterValueOp, ti.context, nil, value)
return value
}
// Close implements the Iterator interface.
func (ti *traceIterator) Close() error {
return ti.parent.Close()
}
// Error delegates the Error call to the parent iterator.
func (ti *traceIterator) Error() error {
return ti.parent.Error()
}
// GetStoreType implements the KVStore interface. It returns the underlying
// KVStore type.
func (tkv *Store) GetStoreType() types.StoreType {
return tkv.parent.GetStoreType()
}
// CacheWrap implements the KVStore interface. It panics because a Store
// cannot be branched.
func (tkv *Store) CacheWrap() types.CacheWrap {
panic("cannot CacheWrap a TraceKVStore")
}
// CacheWrapWithTrace implements the KVStore interface. It panics as a
// Store cannot be branched.
func (tkv *Store) CacheWrapWithTrace(_ io.Writer, _ types.TraceContext) types.CacheWrap {
panic("cannot CacheWrapWithTrace a TraceKVStore")
}
// writeOperation writes a KVStore operation to the underlying io.Writer as
// JSON-encoded data where the key/value pair is base64 encoded.
func writeOperation(w io.Writer, op operation, tc types.TraceContext, key, value []byte) {
traceOp := traceOperation{
Operation: op,
Key: base64.StdEncoding.EncodeToString(key),
Value: base64.StdEncoding.EncodeToString(value),
}
if tc != nil {
traceOp.Metadata = tc
}
raw, err := json.Marshal(traceOp)
if err != nil {
panic(errors.Wrap(err, "failed to serialize trace operation"))
}
if _, err := w.Write(raw); err != nil {
panic(errors.Wrap(err, "failed to write trace operation"))
}
_, err = io.WriteString(w, "\n")
if err != nil {
panic(errors.Wrap(err, "failed to write newline"))
}
}

292
store/tracekv/store_test.go Normal file
View File

@ -0,0 +1,292 @@
package tracekv_test
import (
"bytes"
"fmt"
"io"
"testing"
dbm "github.com/cosmos/cosmos-db"
"github.com/stretchr/testify/require"
"cosmossdk.io/store/dbadapter"
"cosmossdk.io/store/internal/kv"
"cosmossdk.io/store/prefix"
"cosmossdk.io/store/tracekv"
"cosmossdk.io/store/types"
)
func bz(s string) []byte { return []byte(s) }
func keyFmt(i int) []byte { return bz(fmt.Sprintf("key%0.8d", i)) }
func valFmt(i int) []byte { return bz(fmt.Sprintf("value%0.8d", i)) }
var kvPairs = []kv.Pair{
{Key: keyFmt(1), Value: valFmt(1)},
{Key: keyFmt(2), Value: valFmt(2)},
{Key: keyFmt(3), Value: valFmt(3)},
}
func newTraceKVStore(w io.Writer) *tracekv.Store {
store := newEmptyTraceKVStore(w)
for _, kvPair := range kvPairs {
store.Set(kvPair.Key, kvPair.Value)
}
return store
}
func newEmptyTraceKVStore(w io.Writer) *tracekv.Store {
memDB := dbadapter.Store{DB: dbm.NewMemDB()}
tc := types.TraceContext(map[string]interface{}{"blockHeight": 64})
return tracekv.NewStore(memDB, w, tc)
}
func TestTraceKVStoreGet(t *testing.T) {
testCases := []struct {
key []byte
expectedValue []byte
expectedOut string
}{
{
key: kvPairs[0].Key,
expectedValue: kvPairs[0].Value,
expectedOut: "{\"operation\":\"read\",\"key\":\"a2V5MDAwMDAwMDE=\",\"value\":\"dmFsdWUwMDAwMDAwMQ==\",\"metadata\":{\"blockHeight\":64}}\n",
},
{
key: []byte("does-not-exist"),
expectedValue: nil,
expectedOut: "{\"operation\":\"read\",\"key\":\"ZG9lcy1ub3QtZXhpc3Q=\",\"value\":\"\",\"metadata\":{\"blockHeight\":64}}\n",
},
}
for _, tc := range testCases {
var buf bytes.Buffer
store := newTraceKVStore(&buf)
buf.Reset()
value := store.Get(tc.key)
require.Equal(t, tc.expectedValue, value)
require.Equal(t, tc.expectedOut, buf.String())
}
}
func TestTraceKVStoreSet(t *testing.T) {
testCases := []struct {
key []byte
value []byte
expectedOut string
}{
{
key: kvPairs[0].Key,
value: kvPairs[0].Value,
expectedOut: "{\"operation\":\"write\",\"key\":\"a2V5MDAwMDAwMDE=\",\"value\":\"dmFsdWUwMDAwMDAwMQ==\",\"metadata\":{\"blockHeight\":64}}\n",
},
{
key: kvPairs[1].Key,
value: kvPairs[1].Value,
expectedOut: "{\"operation\":\"write\",\"key\":\"a2V5MDAwMDAwMDI=\",\"value\":\"dmFsdWUwMDAwMDAwMg==\",\"metadata\":{\"blockHeight\":64}}\n",
},
{
key: kvPairs[2].Key,
value: kvPairs[2].Value,
expectedOut: "{\"operation\":\"write\",\"key\":\"a2V5MDAwMDAwMDM=\",\"value\":\"dmFsdWUwMDAwMDAwMw==\",\"metadata\":{\"blockHeight\":64}}\n",
},
}
for _, tc := range testCases {
var buf bytes.Buffer
store := newEmptyTraceKVStore(&buf)
buf.Reset()
store.Set(tc.key, tc.value)
require.Equal(t, tc.expectedOut, buf.String())
}
var buf bytes.Buffer
store := newEmptyTraceKVStore(&buf)
require.Panics(t, func() { store.Set([]byte(""), []byte("value")) }, "setting an empty key should panic")
require.Panics(t, func() { store.Set(nil, []byte("value")) }, "setting a nil key should panic")
}
func TestTraceKVStoreDelete(t *testing.T) {
testCases := []struct {
key []byte
expectedOut string
}{
{
key: kvPairs[0].Key,
expectedOut: "{\"operation\":\"delete\",\"key\":\"a2V5MDAwMDAwMDE=\",\"value\":\"\",\"metadata\":{\"blockHeight\":64}}\n",
},
}
for _, tc := range testCases {
var buf bytes.Buffer
store := newTraceKVStore(&buf)
buf.Reset()
store.Delete(tc.key)
require.Equal(t, tc.expectedOut, buf.String())
}
}
func TestTraceKVStoreHas(t *testing.T) {
testCases := []struct {
key []byte
expected bool
}{
{
key: kvPairs[0].Key,
expected: true,
},
}
for _, tc := range testCases {
var buf bytes.Buffer
store := newTraceKVStore(&buf)
buf.Reset()
ok := store.Has(tc.key)
require.Equal(t, tc.expected, ok)
}
}
func TestTestTraceKVStoreIterator(t *testing.T) {
var buf bytes.Buffer
store := newTraceKVStore(&buf)
iterator := store.Iterator(nil, nil)
s, e := iterator.Domain()
require.Equal(t, []byte(nil), s)
require.Equal(t, []byte(nil), e)
testCases := []struct {
expectedKey []byte
expectedValue []byte
expectedKeyOut string
expectedvalueOut string
}{
{
expectedKey: kvPairs[0].Key,
expectedValue: kvPairs[0].Value,
expectedKeyOut: "{\"operation\":\"iterKey\",\"key\":\"a2V5MDAwMDAwMDE=\",\"value\":\"\",\"metadata\":{\"blockHeight\":64}}\n",
expectedvalueOut: "{\"operation\":\"iterValue\",\"key\":\"\",\"value\":\"dmFsdWUwMDAwMDAwMQ==\",\"metadata\":{\"blockHeight\":64}}\n",
},
{
expectedKey: kvPairs[1].Key,
expectedValue: kvPairs[1].Value,
expectedKeyOut: "{\"operation\":\"iterKey\",\"key\":\"a2V5MDAwMDAwMDI=\",\"value\":\"\",\"metadata\":{\"blockHeight\":64}}\n",
expectedvalueOut: "{\"operation\":\"iterValue\",\"key\":\"\",\"value\":\"dmFsdWUwMDAwMDAwMg==\",\"metadata\":{\"blockHeight\":64}}\n",
},
{
expectedKey: kvPairs[2].Key,
expectedValue: kvPairs[2].Value,
expectedKeyOut: "{\"operation\":\"iterKey\",\"key\":\"a2V5MDAwMDAwMDM=\",\"value\":\"\",\"metadata\":{\"blockHeight\":64}}\n",
expectedvalueOut: "{\"operation\":\"iterValue\",\"key\":\"\",\"value\":\"dmFsdWUwMDAwMDAwMw==\",\"metadata\":{\"blockHeight\":64}}\n",
},
}
for _, tc := range testCases {
buf.Reset()
ka := iterator.Key()
require.Equal(t, tc.expectedKeyOut, buf.String())
buf.Reset()
va := iterator.Value()
require.Equal(t, tc.expectedvalueOut, buf.String())
require.Equal(t, tc.expectedKey, ka)
require.Equal(t, tc.expectedValue, va)
iterator.Next()
}
require.False(t, iterator.Valid())
require.Panics(t, iterator.Next)
require.NoError(t, iterator.Close())
}
func TestTestTraceKVStoreReverseIterator(t *testing.T) {
var buf bytes.Buffer
store := newTraceKVStore(&buf)
iterator := store.ReverseIterator(nil, nil)
s, e := iterator.Domain()
require.Equal(t, []byte(nil), s)
require.Equal(t, []byte(nil), e)
testCases := []struct {
expectedKey []byte
expectedValue []byte
expectedKeyOut string
expectedvalueOut string
}{
{
expectedKey: kvPairs[2].Key,
expectedValue: kvPairs[2].Value,
expectedKeyOut: "{\"operation\":\"iterKey\",\"key\":\"a2V5MDAwMDAwMDM=\",\"value\":\"\",\"metadata\":{\"blockHeight\":64}}\n",
expectedvalueOut: "{\"operation\":\"iterValue\",\"key\":\"\",\"value\":\"dmFsdWUwMDAwMDAwMw==\",\"metadata\":{\"blockHeight\":64}}\n",
},
{
expectedKey: kvPairs[1].Key,
expectedValue: kvPairs[1].Value,
expectedKeyOut: "{\"operation\":\"iterKey\",\"key\":\"a2V5MDAwMDAwMDI=\",\"value\":\"\",\"metadata\":{\"blockHeight\":64}}\n",
expectedvalueOut: "{\"operation\":\"iterValue\",\"key\":\"\",\"value\":\"dmFsdWUwMDAwMDAwMg==\",\"metadata\":{\"blockHeight\":64}}\n",
},
{
expectedKey: kvPairs[0].Key,
expectedValue: kvPairs[0].Value,
expectedKeyOut: "{\"operation\":\"iterKey\",\"key\":\"a2V5MDAwMDAwMDE=\",\"value\":\"\",\"metadata\":{\"blockHeight\":64}}\n",
expectedvalueOut: "{\"operation\":\"iterValue\",\"key\":\"\",\"value\":\"dmFsdWUwMDAwMDAwMQ==\",\"metadata\":{\"blockHeight\":64}}\n",
},
}
for _, tc := range testCases {
buf.Reset()
ka := iterator.Key()
require.Equal(t, tc.expectedKeyOut, buf.String())
buf.Reset()
va := iterator.Value()
require.Equal(t, tc.expectedvalueOut, buf.String())
require.Equal(t, tc.expectedKey, ka)
require.Equal(t, tc.expectedValue, va)
iterator.Next()
}
require.False(t, iterator.Valid())
require.Panics(t, iterator.Next)
require.NoError(t, iterator.Close())
}
func TestTraceKVStorePrefix(t *testing.T) {
store := newEmptyTraceKVStore(nil)
pStore := prefix.NewStore(store, []byte("trace_prefix"))
require.IsType(t, prefix.Store{}, pStore)
}
func TestTraceKVStoreGetStoreType(t *testing.T) {
memDB := dbadapter.Store{DB: dbm.NewMemDB()}
store := newEmptyTraceKVStore(nil)
require.Equal(t, memDB.GetStoreType(), store.GetStoreType())
}
func TestTraceKVStoreCacheWrap(t *testing.T) {
store := newEmptyTraceKVStore(nil)
require.Panics(t, func() { store.CacheWrap() })
}
func TestTraceKVStoreCacheWrapWithTrace(t *testing.T) {
store := newEmptyTraceKVStore(nil)
require.Panics(t, func() { store.CacheWrapWithTrace(nil, nil) })
}

53
store/transient/store.go Normal file
View File

@ -0,0 +1,53 @@
package transient
import (
dbm "github.com/cosmos/cosmos-db"
"cosmossdk.io/store/dbadapter"
pruningtypes "cosmossdk.io/store/pruning/types"
"cosmossdk.io/store/types"
)
var (
_ types.Committer = (*Store)(nil)
_ types.KVStore = (*Store)(nil)
)
// Store is a wrapper for a MemDB with Commiter implementation
type Store struct {
dbadapter.Store
}
// Constructs new MemDB adapter
func NewStore() *Store {
return &Store{Store: dbadapter.Store{DB: dbm.NewMemDB()}}
}
// Implements CommitStore
// Commit cleans up Store.
func (ts *Store) Commit() (id types.CommitID) {
ts.Store = dbadapter.Store{DB: dbm.NewMemDB()}
return
}
func (ts *Store) SetPruning(_ pruningtypes.PruningOptions) {}
// GetPruning is a no-op as pruning options cannot be directly set on this store.
// They must be set on the root commit multi-store.
func (ts *Store) GetPruning() pruningtypes.PruningOptions {
return pruningtypes.NewPruningOptions(pruningtypes.PruningUndefined)
}
// Implements CommitStore
func (ts *Store) LastCommitID() types.CommitID {
return types.CommitID{}
}
func (ts *Store) WorkingHash() []byte {
return []byte{}
}
// Implements Store.
func (ts *Store) GetStoreType() types.StoreType {
return types.StoreTypeTransient
}

View File

@ -0,0 +1,34 @@
package transient_test
import (
"bytes"
"testing"
"github.com/stretchr/testify/require"
pruningtypes "cosmossdk.io/store/pruning/types"
"cosmossdk.io/store/transient"
)
var k, v = []byte("hello"), []byte("world")
func TestTransientStore(t *testing.T) {
tstore := transient.NewStore()
require.Nil(t, tstore.Get(k))
tstore.Set(k, v)
require.Equal(t, v, tstore.Get(k))
tstore.Commit()
require.Nil(t, tstore.Get(k))
// no-op
tstore.SetPruning(pruningtypes.NewPruningOptions(pruningtypes.PruningUndefined))
emptyCommitID := tstore.LastCommitID()
require.Equal(t, emptyCommitID.Version, int64(0))
require.True(t, bytes.Equal(emptyCommitID.Hash, nil))
}

89
store/types/codec.go Normal file
View File

@ -0,0 +1,89 @@
package types
import (
"encoding/binary"
fmt "fmt"
proto "github.com/cosmos/gogoproto/proto"
)
// Codec defines a interface needed for the store package to marshal data
type Codec interface {
// Marshal returns binary encoding of v.
Marshal(proto.Message) ([]byte, error)
// MarshalLengthPrefixed returns binary encoding of v with bytes length prefix.
MarshalLengthPrefixed(proto.Message) ([]byte, error)
// Unmarshal parses the data encoded with Marshal method and stores the result
// in the value pointed to by v.
Unmarshal(bz []byte, ptr proto.Message) error
// Unmarshal parses the data encoded with UnmarshalLengthPrefixed method and stores
// the result in the value pointed to by v.
UnmarshalLengthPrefixed(bz []byte, ptr proto.Message) error
}
// ============= TestCodec =============
// TestCodec defines a codec that utilizes Protobuf for both binary and JSON
// encoding.
type TestCodec struct{}
var _ Codec = &TestCodec{}
func NewTestCodec() Codec {
return &TestCodec{}
}
// Marshal implements BinaryMarshaler.Marshal method.
// NOTE: this function must be used with a concrete type which
// implements proto.Message. For interface please use the codec.MarshalInterface
func (pc *TestCodec) Marshal(o proto.Message) ([]byte, error) {
// Size() check can catch the typed nil value.
if o == nil || proto.Size(o) == 0 {
// return empty bytes instead of nil, because nil has special meaning in places like store.Set
return []byte{}, nil
}
return proto.Marshal(o)
}
// MarshalLengthPrefixed implements BinaryMarshaler.MarshalLengthPrefixed method.
func (pc *TestCodec) MarshalLengthPrefixed(o proto.Message) ([]byte, error) {
bz, err := pc.Marshal(o)
if err != nil {
return nil, err
}
var sizeBuf [binary.MaxVarintLen64]byte
n := binary.PutUvarint(sizeBuf[:], uint64(len(bz)))
return append(sizeBuf[:n], bz...), nil
}
// Unmarshal implements BinaryMarshaler.Unmarshal method.
// NOTE: this function must be used with a concrete type which
// implements proto.Message. For interface please use the codec.UnmarshalInterface
func (pc *TestCodec) Unmarshal(bz []byte, ptr proto.Message) error {
err := proto.Unmarshal(bz, ptr)
if err != nil {
return err
}
return nil
}
// UnmarshalLengthPrefixed implements BinaryMarshaler.UnmarshalLengthPrefixed method.
func (pc *TestCodec) UnmarshalLengthPrefixed(bz []byte, ptr proto.Message) error {
size, n := binary.Uvarint(bz)
if n < 0 {
return fmt.Errorf("invalid number of bytes read from length-prefixed encoding: %d", n)
}
if size > uint64(len(bz)-n) {
return fmt.Errorf("not enough bytes to read; want: %v, got: %v", size, len(bz)-n)
} else if size < uint64(len(bz)-n) {
return fmt.Errorf("too many bytes to read; want: %v, got: %v", size, len(bz)-n)
}
bz = bz[n:]
return proto.Unmarshal(bz, ptr)
}

View File

@ -0,0 +1,62 @@
package types
import (
"crypto/sha256"
cmtprotocrypto "github.com/cometbft/cometbft/proto/tendermint/crypto"
"cosmossdk.io/store/internal/maps"
)
// GetHash returns the GetHash from the CommitID.
// This is used in CommitInfo.Hash()
//
// When we commit to this in a merkle proof, we create a map of storeInfo.Name -> storeInfo.GetHash()
// and build a merkle proof from that.
// This is then chained with the substore proof, so we prove the root hash from the substore before this
// and need to pass that (unmodified) as the leaf value of the multistore proof.
func (si StoreInfo) GetHash() []byte {
return si.CommitId.Hash
}
func (ci CommitInfo) toMap() map[string][]byte {
m := make(map[string][]byte, len(ci.StoreInfos))
for _, storeInfo := range ci.StoreInfos {
m[storeInfo.Name] = storeInfo.GetHash()
}
return m
}
// Hash returns the simple merkle root hash of the stores sorted by name.
func (ci CommitInfo) Hash() []byte {
// we need a special case for empty set, as SimpleProofsFromMap requires at least one entry
if len(ci.StoreInfos) == 0 {
emptyHash := sha256.Sum256([]byte{})
return emptyHash[:]
}
rootHash, _, _ := maps.ProofsFromMap(ci.toMap())
if len(rootHash) == 0 {
emptyHash := sha256.Sum256([]byte{})
return emptyHash[:]
}
return rootHash
}
func (ci CommitInfo) ProofOp(storeName string) cmtprotocrypto.ProofOp {
ret, err := ProofOpFromMap(ci.toMap(), storeName)
if err != nil {
panic(err)
}
return ret
}
func (ci CommitInfo) CommitID() CommitID {
return CommitID{
Version: ci.Version,
Hash: ci.Hash(),
}
}

View File

@ -0,0 +1,864 @@
// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: cosmos/store/v1beta1/commit_info.proto
package types
import (
fmt "fmt"
_ "github.com/cosmos/gogoproto/gogoproto"
proto "github.com/cosmos/gogoproto/proto"
github_com_cosmos_gogoproto_types "github.com/cosmos/gogoproto/types"
_ "google.golang.org/protobuf/types/known/timestamppb"
io "io"
math "math"
math_bits "math/bits"
time "time"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
var _ = time.Kitchen
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
// CommitInfo defines commit information used by the multi-store when committing
// a version/height.
type CommitInfo struct {
Version int64 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"`
StoreInfos []StoreInfo `protobuf:"bytes,2,rep,name=store_infos,json=storeInfos,proto3" json:"store_infos"`
Timestamp time.Time `protobuf:"bytes,3,opt,name=timestamp,proto3,stdtime" json:"timestamp"`
}
func (m *CommitInfo) Reset() { *m = CommitInfo{} }
func (m *CommitInfo) String() string { return proto.CompactTextString(m) }
func (*CommitInfo) ProtoMessage() {}
func (*CommitInfo) Descriptor() ([]byte, []int) {
return fileDescriptor_5f8c656cdef8c524, []int{0}
}
func (m *CommitInfo) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *CommitInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_CommitInfo.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *CommitInfo) XXX_Merge(src proto.Message) {
xxx_messageInfo_CommitInfo.Merge(m, src)
}
func (m *CommitInfo) XXX_Size() int {
return m.Size()
}
func (m *CommitInfo) XXX_DiscardUnknown() {
xxx_messageInfo_CommitInfo.DiscardUnknown(m)
}
var xxx_messageInfo_CommitInfo proto.InternalMessageInfo
func (m *CommitInfo) GetVersion() int64 {
if m != nil {
return m.Version
}
return 0
}
func (m *CommitInfo) GetStoreInfos() []StoreInfo {
if m != nil {
return m.StoreInfos
}
return nil
}
func (m *CommitInfo) GetTimestamp() time.Time {
if m != nil {
return m.Timestamp
}
return time.Time{}
}
// StoreInfo defines store-specific commit information. It contains a reference
// between a store name and the commit ID.
type StoreInfo struct {
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
CommitId CommitID `protobuf:"bytes,2,opt,name=commit_id,json=commitId,proto3" json:"commit_id"`
}
func (m *StoreInfo) Reset() { *m = StoreInfo{} }
func (m *StoreInfo) String() string { return proto.CompactTextString(m) }
func (*StoreInfo) ProtoMessage() {}
func (*StoreInfo) Descriptor() ([]byte, []int) {
return fileDescriptor_5f8c656cdef8c524, []int{1}
}
func (m *StoreInfo) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *StoreInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_StoreInfo.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *StoreInfo) XXX_Merge(src proto.Message) {
xxx_messageInfo_StoreInfo.Merge(m, src)
}
func (m *StoreInfo) XXX_Size() int {
return m.Size()
}
func (m *StoreInfo) XXX_DiscardUnknown() {
xxx_messageInfo_StoreInfo.DiscardUnknown(m)
}
var xxx_messageInfo_StoreInfo proto.InternalMessageInfo
func (m *StoreInfo) GetName() string {
if m != nil {
return m.Name
}
return ""
}
func (m *StoreInfo) GetCommitId() CommitID {
if m != nil {
return m.CommitId
}
return CommitID{}
}
// CommitID defines the commitment information when a specific store is
// committed.
type CommitID struct {
Version int64 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"`
Hash []byte `protobuf:"bytes,2,opt,name=hash,proto3" json:"hash,omitempty"`
}
func (m *CommitID) Reset() { *m = CommitID{} }
func (*CommitID) ProtoMessage() {}
func (*CommitID) Descriptor() ([]byte, []int) {
return fileDescriptor_5f8c656cdef8c524, []int{2}
}
func (m *CommitID) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *CommitID) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_CommitID.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *CommitID) XXX_Merge(src proto.Message) {
xxx_messageInfo_CommitID.Merge(m, src)
}
func (m *CommitID) XXX_Size() int {
return m.Size()
}
func (m *CommitID) XXX_DiscardUnknown() {
xxx_messageInfo_CommitID.DiscardUnknown(m)
}
var xxx_messageInfo_CommitID proto.InternalMessageInfo
func (m *CommitID) GetVersion() int64 {
if m != nil {
return m.Version
}
return 0
}
func (m *CommitID) GetHash() []byte {
if m != nil {
return m.Hash
}
return nil
}
func init() {
proto.RegisterType((*CommitInfo)(nil), "cosmos.store.v1beta1.CommitInfo")
proto.RegisterType((*StoreInfo)(nil), "cosmos.store.v1beta1.StoreInfo")
proto.RegisterType((*CommitID)(nil), "cosmos.store.v1beta1.CommitID")
}
func init() {
proto.RegisterFile("cosmos/store/v1beta1/commit_info.proto", fileDescriptor_5f8c656cdef8c524)
}
var fileDescriptor_5f8c656cdef8c524 = []byte{
// 336 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x91, 0xb1, 0x4e, 0xf2, 0x50,
0x14, 0xc7, 0x7b, 0xa1, 0xf9, 0x3e, 0x7a, 0x70, 0xba, 0x61, 0x68, 0x18, 0x6e, 0x09, 0x83, 0x61,
0xba, 0x0d, 0xb8, 0x39, 0x98, 0x58, 0x8d, 0x09, 0x6b, 0x75, 0x72, 0x31, 0x2d, 0x5c, 0x4a, 0xa3,
0xed, 0x21, 0xdc, 0x2b, 0x89, 0x6f, 0xc1, 0xe8, 0xe8, 0x33, 0xf8, 0x14, 0x8c, 0x8c, 0x4e, 0x6a,
0xe0, 0x45, 0x4c, 0x4f, 0x5b, 0x5c, 0x88, 0xdb, 0x39, 0xed, 0xef, 0x9c, 0xff, 0xaf, 0xa7, 0x70,
0x3a, 0x41, 0x9d, 0xa1, 0xf6, 0xb5, 0xc1, 0xa5, 0xf2, 0x57, 0xc3, 0x58, 0x99, 0x68, 0xe8, 0x4f,
0x30, 0xcb, 0x52, 0xf3, 0x90, 0xe6, 0x33, 0x94, 0x8b, 0x25, 0x1a, 0xe4, 0x9d, 0x92, 0x93, 0xc4,
0xc9, 0x8a, 0xeb, 0x76, 0x12, 0x4c, 0x90, 0x00, 0xbf, 0xa8, 0x4a, 0xb6, 0xeb, 0x25, 0x88, 0xc9,
0x93, 0xf2, 0xa9, 0x8b, 0x9f, 0x67, 0xbe, 0x49, 0x33, 0xa5, 0x4d, 0x94, 0x2d, 0x4a, 0xa0, 0xff,
0xce, 0x00, 0xae, 0x28, 0x62, 0x9c, 0xcf, 0x90, 0xbb, 0xf0, 0x7f, 0xa5, 0x96, 0x3a, 0xc5, 0xdc,
0x65, 0x3d, 0x36, 0x68, 0x86, 0x75, 0xcb, 0x6f, 0xa0, 0x4d, 0x81, 0x64, 0xa2, 0xdd, 0x46, 0xaf,
0x39, 0x68, 0x8f, 0x3c, 0x79, 0xcc, 0x45, 0xde, 0x16, 0x5d, 0xb1, 0x2f, 0xb0, 0x37, 0x9f, 0x9e,
0x15, 0x82, 0xae, 0x1f, 0x68, 0x1e, 0x80, 0x73, 0x70, 0x70, 0x9b, 0x3d, 0x36, 0x68, 0x8f, 0xba,
0xb2, 0xb4, 0x94, 0xb5, 0xa5, 0xbc, 0xab, 0x89, 0xa0, 0x55, 0x2c, 0x58, 0x7f, 0x79, 0x2c, 0xfc,
0x1d, 0xeb, 0xc7, 0xe0, 0x1c, 0x22, 0x38, 0x07, 0x3b, 0x8f, 0x32, 0x45, 0xbe, 0x4e, 0x48, 0x35,
0xbf, 0x04, 0xa7, 0xbe, 0xdb, 0xd4, 0x6d, 0x50, 0x88, 0x38, 0xae, 0x5a, 0x7d, 0xfb, 0x75, 0x65,
0xda, 0x2a, 0xc7, 0xc6, 0xd3, 0xfe, 0x05, 0xb4, 0xea, 0x77, 0x7f, 0x5c, 0x85, 0x83, 0x3d, 0x8f,
0xf4, 0x9c, 0x32, 0x4e, 0x42, 0xaa, 0xcf, 0xed, 0xd7, 0x37, 0xcf, 0x0a, 0x46, 0x9b, 0x9d, 0x60,
0xdb, 0x9d, 0x60, 0xdf, 0x3b, 0xc1, 0xd6, 0x7b, 0x61, 0x6d, 0xf7, 0xc2, 0xfa, 0xd8, 0x0b, 0xeb,
0xde, 0x2d, 0x45, 0xf4, 0xf4, 0x51, 0xa6, 0x58, 0xfd, 0x6d, 0xf3, 0xb2, 0x50, 0x3a, 0xfe, 0x47,
0x07, 0x38, 0xfb, 0x09, 0x00, 0x00, 0xff, 0xff, 0x67, 0xb7, 0x0d, 0x59, 0x0a, 0x02, 0x00, 0x00,
}
func (m *CommitInfo) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *CommitInfo) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *CommitInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
n1, err1 := github_com_cosmos_gogoproto_types.StdTimeMarshalTo(m.Timestamp, dAtA[i-github_com_cosmos_gogoproto_types.SizeOfStdTime(m.Timestamp):])
if err1 != nil {
return 0, err1
}
i -= n1
i = encodeVarintCommitInfo(dAtA, i, uint64(n1))
i--
dAtA[i] = 0x1a
if len(m.StoreInfos) > 0 {
for iNdEx := len(m.StoreInfos) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.StoreInfos[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintCommitInfo(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x12
}
}
if m.Version != 0 {
i = encodeVarintCommitInfo(dAtA, i, uint64(m.Version))
i--
dAtA[i] = 0x8
}
return len(dAtA) - i, nil
}
func (m *StoreInfo) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *StoreInfo) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *StoreInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
{
size, err := m.CommitId.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintCommitInfo(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x12
if len(m.Name) > 0 {
i -= len(m.Name)
copy(dAtA[i:], m.Name)
i = encodeVarintCommitInfo(dAtA, i, uint64(len(m.Name)))
i--
dAtA[i] = 0xa
}
return len(dAtA) - i, nil
}
func (m *CommitID) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *CommitID) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *CommitID) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if len(m.Hash) > 0 {
i -= len(m.Hash)
copy(dAtA[i:], m.Hash)
i = encodeVarintCommitInfo(dAtA, i, uint64(len(m.Hash)))
i--
dAtA[i] = 0x12
}
if m.Version != 0 {
i = encodeVarintCommitInfo(dAtA, i, uint64(m.Version))
i--
dAtA[i] = 0x8
}
return len(dAtA) - i, nil
}
func encodeVarintCommitInfo(dAtA []byte, offset int, v uint64) int {
offset -= sovCommitInfo(v)
base := offset
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
return base
}
func (m *CommitInfo) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if m.Version != 0 {
n += 1 + sovCommitInfo(uint64(m.Version))
}
if len(m.StoreInfos) > 0 {
for _, e := range m.StoreInfos {
l = e.Size()
n += 1 + l + sovCommitInfo(uint64(l))
}
}
l = github_com_cosmos_gogoproto_types.SizeOfStdTime(m.Timestamp)
n += 1 + l + sovCommitInfo(uint64(l))
return n
}
func (m *StoreInfo) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = len(m.Name)
if l > 0 {
n += 1 + l + sovCommitInfo(uint64(l))
}
l = m.CommitId.Size()
n += 1 + l + sovCommitInfo(uint64(l))
return n
}
func (m *CommitID) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if m.Version != 0 {
n += 1 + sovCommitInfo(uint64(m.Version))
}
l = len(m.Hash)
if l > 0 {
n += 1 + l + sovCommitInfo(uint64(l))
}
return n
}
func sovCommitInfo(x uint64) (n int) {
return (math_bits.Len64(x|1) + 6) / 7
}
func sozCommitInfo(x uint64) (n int) {
return sovCommitInfo(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (m *CommitInfo) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowCommitInfo
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: CommitInfo: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: CommitInfo: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType)
}
m.Version = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowCommitInfo
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Version |= int64(b&0x7F) << shift
if b < 0x80 {
break
}
}
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field StoreInfos", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowCommitInfo
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthCommitInfo
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthCommitInfo
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.StoreInfos = append(m.StoreInfos, StoreInfo{})
if err := m.StoreInfos[len(m.StoreInfos)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowCommitInfo
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthCommitInfo
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthCommitInfo
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := github_com_cosmos_gogoproto_types.StdTimeUnmarshal(&m.Timestamp, dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipCommitInfo(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthCommitInfo
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *StoreInfo) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowCommitInfo
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: StoreInfo: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: StoreInfo: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowCommitInfo
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthCommitInfo
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthCommitInfo
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Name = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field CommitId", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowCommitInfo
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthCommitInfo
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthCommitInfo
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.CommitId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipCommitInfo(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthCommitInfo
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *CommitID) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowCommitInfo
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: CommitID: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: CommitID: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType)
}
m.Version = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowCommitInfo
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Version |= int64(b&0x7F) << shift
if b < 0x80 {
break
}
}
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowCommitInfo
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthCommitInfo
}
postIndex := iNdEx + byteLen
if postIndex < 0 {
return ErrInvalidLengthCommitInfo
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Hash = append(m.Hash[:0], dAtA[iNdEx:postIndex]...)
if m.Hash == nil {
m.Hash = []byte{}
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipCommitInfo(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthCommitInfo
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func skipCommitInfo(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
depth := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowCommitInfo
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
wireType := int(wire & 0x7)
switch wireType {
case 0:
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowCommitInfo
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
iNdEx++
if dAtA[iNdEx-1] < 0x80 {
break
}
}
case 1:
iNdEx += 8
case 2:
var length int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowCommitInfo
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
length |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if length < 0 {
return 0, ErrInvalidLengthCommitInfo
}
iNdEx += length
case 3:
depth++
case 4:
if depth == 0 {
return 0, ErrUnexpectedEndOfGroupCommitInfo
}
depth--
case 5:
iNdEx += 4
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
if iNdEx < 0 {
return 0, ErrInvalidLengthCommitInfo
}
if depth == 0 {
return iNdEx, nil
}
}
return 0, io.ErrUnexpectedEOF
}
var (
ErrInvalidLengthCommitInfo = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowCommitInfo = fmt.Errorf("proto: integer overflow")
ErrUnexpectedEndOfGroupCommitInfo = fmt.Errorf("proto: unexpected end of group")
)

13
store/types/context.go Normal file
View File

@ -0,0 +1,13 @@
package types
import (
"cosmossdk.io/log"
)
// Context is an interface used by an App to pass context information
// needed to process store streaming requests.
type Context interface {
BlockHeight() int64
Logger() log.Logger
StreamingManager() StreamingManager
}

28
store/types/errors.go Normal file
View File

@ -0,0 +1,28 @@
package types
import (
"cosmossdk.io/errors"
)
const StoreCodespace = "store"
var (
// ErrInvalidProof is returned when a proof is invalid
ErrInvalidProof = errors.Register(StoreCodespace, 2, "invalid proof")
// ErrTxDecode is returned if we cannot parse a transaction
ErrTxDecode = errors.Register(StoreCodespace, 3, "tx parse error")
// ErrUnknownRequest to doc
ErrUnknownRequest = errors.Register(StoreCodespace, 4, "unknown request")
// ErrLogic defines an internal logic error, e.g. an invariant or assertion
// that is violated. It is a programmer error, not a user-facing error.
ErrLogic = errors.Register(StoreCodespace, 5, "internal logic error")
// ErrConflict defines a conflict error, e.g. when two goroutines try to access
// the same resource and one of them fails.
ErrConflict = errors.Register(StoreCodespace, 6, "conflict")
// ErrInvalidRequest defines an ABCI typed error where the request contains
// invalid data.
ErrInvalidRequest = errors.Register(StoreCodespace, 7, "invalid request")
)

Some files were not shown because too many files have changed in this diff Show More