chore: Db linting (#12141)

* mainly sdk.int to cosmossdk.io/math

* staking keys

* fumpt

* var-naming linter errors and a fumpt

* Update CHANGELOG.md

* Update .golangci.yml

* Update CHANGELOG.md

* Update test_helpers.go

* Update test_helpers.go

* fumpt and lint

* this lints the db module, and makes it easier to use.  It adds breaking name changes

* DBConnection -> Connection

* previous commit contained a merge error

* Update test_helpers.go

* Update test_helpers.go

* db renamings

* merge master

* changelog

* DBWriter -> Writer

* consistent multistore reciever

* standard recievers for multistore v2alpha1

* general cleanup of linting issues

* more linter fixes

* remove prealloc linter

* nolint the secp256k1 import

* nolint the secp256k1 package

* completenolint resulting in a diff that has only nolints
This commit is contained in:
Jacob Gadikian 2022-06-09 00:02:01 +07:00 committed by GitHub
parent 9e6607172b
commit b0e82f9640
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
75 changed files with 440 additions and 477 deletions

View File

@ -22,7 +22,6 @@ linters:
- misspell
- nakedret
- nolintlint
- prealloc
- revive
- staticcheck
- structcheck

View File

@ -49,10 +49,12 @@ Ref: https://keepachangelog.com/en/1.0.0/
### API Breaking Changes
* (x/staking) [#12102](https://github.com/cosmos/cosmos-sdk/pull/12102) Staking keeper now is passed by reference instead of copy. Keeper's SetHooks no longer returns keeper. It updates the keeper in place instead.
* (linting) [#12141](https://github.com/cosmos/cosmos-sdk/pull/12141) Fix usability related linting for database. This means removing the infix Prefix from `prefix.NewPrefixWriter` and such so that it is `prefix.NewWriter` and making `db.DBConnection` and such into `db.Connection`
### Bug Fixes
* (linting) [#12135](https://github.com/cosmos/cosmos-sdk/pull/12135/) Fix variable naming issues per enabled linters. Run gofumpt to ensure easy reviews of ongoing linting work.
* (linting) [#12135](https://github.com/cosmos/cosmos-sdk/pull/12135) Fix variable naming issues per enabled linters. Run gofumpt to ensure easy reviews of ongoing linting work.
* (linting) [#12132](https://github.com/cosmos/cosmos-sdk/pull/12132) Change sdk.Int to math.Int, run `gofumpt -w -l .`, and `golangci-lint run ./... --fix`
* (cli) [#12127](https://github.com/cosmos/cosmos-sdk/pull/12127) Fix the CLI not always taking into account `--fee-payer` and `--fee-granter` flags.
* (migrations) [#12028](https://github.com/cosmos/cosmos-sdk/pull/12028) Fix v0.45->v0.46 in-place store migrations.

View File

@ -17,10 +17,13 @@ import (
"github.com/cosmos/cosmos-sdk/types/errors"
"github.com/cosmos/cosmos-sdk/version"
legacybech32 "github.com/cosmos/cosmos-sdk/types/bech32/legacybech32"
legacybech32 "github.com/cosmos/cosmos-sdk/types/bech32/legacybech32" //nolint:staticcheck // we do old keys, they're keys after all.
)
var flagPubkeyType = "type"
var (
flagPubkeyType = "type"
ed = "ed25519"
)
// Cmd creates a main CLI command
func Cmd() *cobra.Command {
@ -69,7 +72,7 @@ $ %s debug pubkey '{"@type":"/cosmos.crypto.secp256k1.PubKey","key":"AurroA7jvfP
}
func bytesToPubkey(bz []byte, keytype string) (cryptotypes.PubKey, bool) {
if keytype == "ed25519" {
if keytype == ed {
if len(bz) == ed25519.PubKeySize {
return &ed25519.PubKey{Key: bz}, true
}
@ -102,17 +105,17 @@ func getPubKeyFromRawString(pkstr string, keytype string) (cryptotypes.PubKey, e
}
}
pk, err := legacybech32.UnmarshalPubKey(legacybech32.AccPK, pkstr)
pk, err := legacybech32.UnmarshalPubKey(legacybech32.AccPK, pkstr) //nolint:staticcheck // we do old keys, they're keys after all.
if err == nil {
return pk, nil
}
pk, err = legacybech32.UnmarshalPubKey(legacybech32.ValPK, pkstr)
pk, err = legacybech32.UnmarshalPubKey(legacybech32.ValPK, pkstr) //nolint:staticcheck // we do old keys, they're keys after all.
if err == nil {
return pk, nil
}
pk, err = legacybech32.UnmarshalPubKey(legacybech32.ConsPK, pkstr)
pk, err = legacybech32.UnmarshalPubKey(legacybech32.ConsPK, pkstr) //nolint:staticcheck // we do old keys, they're keys after all.
if err == nil {
return pk, nil
}
@ -138,7 +141,7 @@ $ %s debug pubkey-raw cosmos1e0jnq2sun3dzjh8p2xq95kk0expwmd7shwjpfg
return err
}
pubkeyType = strings.ToLower(pubkeyType)
if pubkeyType != "secp256k1" && pubkeyType != "ed25519" {
if pubkeyType != "secp256k1" && pubkeyType != ed {
return errors.Wrapf(errors.ErrInvalidType, "invalid pubkey type, expected oneof ed25519 or secp256k1")
}
@ -149,8 +152,8 @@ $ %s debug pubkey-raw cosmos1e0jnq2sun3dzjh8p2xq95kk0expwmd7shwjpfg
var consensusPub string
edPK, ok := pk.(*ed25519.PubKey)
if ok && pubkeyType == "ed25519" {
consensusPub, err = legacybech32.MarshalPubKey(legacybech32.ConsPK, edPK)
if ok && pubkeyType == ed {
consensusPub, err = legacybech32.MarshalPubKey(legacybech32.ConsPK, edPK) //nolint:staticcheck // we do old keys, they're keys after all.
if err != nil {
return err
}
@ -163,11 +166,11 @@ $ %s debug pubkey-raw cosmos1e0jnq2sun3dzjh8p2xq95kk0expwmd7shwjpfg
if err != nil {
return err
}
accPub, err := legacybech32.MarshalPubKey(legacybech32.AccPK, pk)
accPub, err := legacybech32.MarshalPubKey(legacybech32.AccPK, pk) //nolint:staticcheck // we do old keys, they're keys after all.
if err != nil {
return err
}
valPub, err := legacybech32.MarshalPubKey(legacybech32.ValPK, pk)
valPub, err := legacybech32.MarshalPubKey(legacybech32.ValPK, pk) //nolint:staticcheck // we do old keys, they're keys after all.
if err != nil {
return err
}
@ -182,7 +185,7 @@ $ %s debug pubkey-raw cosmos1e0jnq2sun3dzjh8p2xq95kk0expwmd7shwjpfg
return nil
},
}
cmd.Flags().StringP(flagPubkeyType, "t", "ed25519", "Pubkey type to decode (oneof secp256k1, ed25519)")
cmd.Flags().StringP(flagPubkeyType, "t", ed, "Pubkey type to decode (oneof secp256k1, ed25519)")
return cmd
}

View File

@ -6,7 +6,7 @@ import (
"fmt"
"strings"
legacyproto "github.com/golang/protobuf/proto"
legacyproto "github.com/golang/protobuf/proto" //nolint:staticcheck // we're aware this is deprecated and using it anyhow.
"google.golang.org/grpc/encoding"
"google.golang.org/protobuf/proto"

View File

@ -29,7 +29,7 @@
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//nolint // this nolint lets us use this file in its original and unmodified form.
package secp256k1
import (

View File

@ -5,6 +5,7 @@
//go:build !gofuzz && cgo
// +build !gofuzz,cgo
//nolint // this nolint lets us use this file in its original and unmodified form.
package secp256k1
import (

View File

@ -5,6 +5,7 @@
//go:build gofuzz || !cgo
// +build gofuzz !cgo
//nolint // this nolint lets us use this file in its original and unmodified form.
package secp256k1
import "math/big"

View File

@ -6,7 +6,7 @@ Databases supporting mappings of arbitrary byte sequences.
The database interface types consist of objects to encapsulate the singular connection to the DB, transactions being made to it, historical version state, and iteration.
### `DBConnection`
### `Connection`
This interface represents a connection to a versioned key-value database. All versioning operations are performed using methods on this type.

View File

@ -1,11 +1,11 @@
package db
type readerRWAdapter struct{ DBReader }
type readerRWAdapter struct{ Reader }
// ReaderAsReadWriter returns a ReadWriter that forwards to a reader and errors if writes are
// attempted. Can be used to pass a Reader when a ReadWriter is expected
// but no writes will actually occur.
func ReaderAsReadWriter(r DBReader) DBReadWriter {
func ReaderAsReadWriter(r Reader) ReadWriter {
return readerRWAdapter{r}
}

View File

@ -22,10 +22,10 @@ import (
var versionsFilename = "versions.csv"
var (
_ db.DBConnection = (*BadgerDB)(nil)
_ db.DBReader = (*badgerTxn)(nil)
_ db.DBWriter = (*badgerWriter)(nil)
_ db.DBReadWriter = (*badgerWriter)(nil)
_ db.Connection = (*BadgerDB)(nil)
_ db.Reader = (*badgerTxn)(nil)
_ db.Writer = (*badgerWriter)(nil)
_ db.ReadWriter = (*badgerWriter)(nil)
)
// BadgerDB is a connection to a BadgerDB key-value database.
@ -164,14 +164,14 @@ func writeVersionsFile(vm *versionManager, path string) error {
return w.WriteAll(rows)
}
func (b *BadgerDB) Reader() db.DBReader {
func (b *BadgerDB) Reader() db.Reader {
b.mtx.RLock()
ts := b.vmgr.lastTs
b.mtx.RUnlock()
return &badgerTxn{txn: b.db.NewTransactionAt(ts, false), db: b}
}
func (b *BadgerDB) ReaderAt(version uint64) (db.DBReader, error) {
func (b *BadgerDB) ReaderAt(version uint64) (db.Reader, error) {
b.mtx.RLock()
defer b.mtx.RUnlock()
ts, has := b.vmgr.versionTs(version)
@ -181,7 +181,7 @@ func (b *BadgerDB) ReaderAt(version uint64) (db.DBReader, error) {
return &badgerTxn{txn: b.db.NewTransactionAt(ts, false), db: b}, nil
}
func (b *BadgerDB) ReadWriter() db.DBReadWriter {
func (b *BadgerDB) ReadWriter() db.ReadWriter {
atomic.AddInt32(&b.openWriters, 1)
b.mtx.RLock()
ts := b.vmgr.lastTs
@ -189,7 +189,7 @@ func (b *BadgerDB) ReadWriter() db.DBReadWriter {
return &badgerWriter{badgerTxn{txn: b.db.NewTransactionAt(ts, true), db: b}, false}
}
func (b *BadgerDB) Writer() db.DBWriter {
func (b *BadgerDB) Writer() db.Writer {
// Badger has a WriteBatch, but it doesn't support conflict detection
return b.ReadWriter()
}
@ -201,7 +201,7 @@ func (b *BadgerDB) Close() error {
return b.db.Close()
}
// Versions implements DBConnection.
// Versions implements Connection.
// Returns a VersionSet that is valid until the next call to SaveVersion or DeleteVersion.
func (b *BadgerDB) Versions() (db.VersionSet, error) {
b.mtx.RLock()
@ -219,12 +219,12 @@ func (b *BadgerDB) save(target uint64) (uint64, error) {
return b.vmgr.Save(target)
}
// SaveNextVersion implements DBConnection.
// SaveNextVersion implements Connection.
func (b *BadgerDB) SaveNextVersion() (uint64, error) {
return b.save(0)
}
// SaveVersion implements DBConnection.
// SaveVersion implements Connection.
func (b *BadgerDB) SaveVersion(target uint64) error {
if target == 0 {
return db.ErrInvalidVersion

View File

@ -9,7 +9,7 @@ import (
"github.com/cosmos/cosmos-sdk/db/dbtest"
)
func load(t *testing.T, dir string) db.DBConnection {
func load(t *testing.T, dir string) db.Connection {
d, err := NewDB(dir)
require.NoError(t, err)
return d

View File

@ -21,7 +21,7 @@ func BytesToInt64(buf []byte) int64 {
return int64(binary.BigEndian.Uint64(buf))
}
func BenchmarkRangeScans(b *testing.B, db dbm.DBReadWriter, dbSize int64) {
func BenchmarkRangeScans(b *testing.B, db dbm.ReadWriter, dbSize int64) {
b.StopTimer()
rangeSize := int64(10000)
@ -40,7 +40,7 @@ func BenchmarkRangeScans(b *testing.B, db dbm.DBReadWriter, dbSize int64) {
b.StartTimer()
for i := 0; i < b.N; i++ {
start := rand.Int63n(dbSize - rangeSize) // nolint: gosec
start := rand.Int63n(dbSize - rangeSize)
end := start + rangeSize
iter, err := db.Iterator(Int64ToBytes(start), Int64ToBytes(end))
require.NoError(b, err)
@ -53,7 +53,7 @@ func BenchmarkRangeScans(b *testing.B, db dbm.DBReadWriter, dbSize int64) {
}
}
func BenchmarkRandomReadsWrites(b *testing.B, db dbm.DBReadWriter) {
func BenchmarkRandomReadsWrites(b *testing.B, db dbm.ReadWriter) {
b.StopTimer()
// create dummy data
@ -67,7 +67,7 @@ func BenchmarkRandomReadsWrites(b *testing.B, db dbm.DBReadWriter) {
for i := 0; i < b.N; i++ {
{
idx := rand.Int63n(numItems) // nolint: gosec
idx := rand.Int63n(numItems)
internal[idx]++
val := internal[idx]
idxBytes := Int64ToBytes(idx)
@ -80,7 +80,7 @@ func BenchmarkRandomReadsWrites(b *testing.B, db dbm.DBReadWriter) {
}
{
idx := rand.Int63n(numItems) // nolint: gosec
idx := rand.Int63n(numItems)
valExp := internal[idx]
idxBytes := Int64ToBytes(idx)
valBytes, err := db.Get(idxBytes)

View File

@ -11,7 +11,7 @@ import (
dbm "github.com/cosmos/cosmos-sdk/db"
)
type Loader func(*testing.T, string) dbm.DBConnection
type Loader func(*testing.T, string) dbm.Connection
func ikey(i int) []byte { return []byte(fmt.Sprintf("key-%03d", i)) }
func ival(i int) []byte { return []byte(fmt.Sprintf("val-%03d", i)) }
@ -20,10 +20,9 @@ func DoTestGetSetHasDelete(t *testing.T, load Loader) {
t.Helper()
db := load(t, t.TempDir())
var txn dbm.DBReadWriter
var view dbm.DBReader
var txn dbm.ReadWriter
view := db.Reader()
view = db.Reader()
require.NotNil(t, view)
// A nonexistent key should return nil.
@ -261,11 +260,11 @@ func DoTestVersioning(t *testing.T, load Loader) {
require.False(t, has)
require.NoError(t, view.Discard())
view, err = db.ReaderAt(versions.Last() + 1)
view, err = db.ReaderAt(versions.Last() + 1) //nolint:staticcheck // we nolint here because we are checking for the absence of an error.
require.Equal(t, dbm.ErrVersionDoesNotExist, err, "should fail to read a nonexistent version")
require.NoError(t, db.DeleteVersion(v2), "should delete version v2")
view, err = db.ReaderAt(v2)
view, err = db.ReaderAt(v2) //nolint:staticcheck // we nolint here because we are checking for the absence of an error.
require.Equal(t, dbm.ErrVersionDoesNotExist, err)
// Ensure latest version is accurate
@ -298,9 +297,9 @@ func DoTestTransactions(t *testing.T, load Loader, multipleWriters bool) {
t.Helper()
db := load(t, t.TempDir())
// Both methods should work in a DBWriter context
writerFuncs := []func() dbm.DBWriter{
writerFuncs := []func() dbm.Writer{
db.Writer,
func() dbm.DBWriter { return db.ReadWriter() },
func() dbm.Writer { return db.ReadWriter() },
}
for _, getWriter := range writerFuncs {
@ -397,7 +396,7 @@ func DoTestRevert(t *testing.T, load Loader, reload bool) {
t.Helper()
dirname := t.TempDir()
db := load(t, dirname)
var txn dbm.DBWriter
var txn dbm.Writer
initContents := func() {
txn = db.Writer()

View File

@ -39,7 +39,7 @@ func AssertKeyPanics(t *testing.T, itr dbm.Iterator) {
assert.Panics(t, func() { itr.Key() }, "checkKeyPanics expected panic but didn't")
}
func AssertValue(t *testing.T, db dbm.DBReader, key, valueWanted []byte) {
func AssertValue(t *testing.T, db dbm.Reader, key, valueWanted []byte) {
t.Helper()
valueGot, err := db.Get(key)
assert.NoError(t, err)

View File

@ -43,10 +43,10 @@ type dbTxn struct {
type dbWriter struct{ dbTxn }
var (
_ db.DBConnection = (*MemDB)(nil)
_ db.DBReader = (*dbTxn)(nil)
_ db.DBWriter = (*dbWriter)(nil)
_ db.DBReadWriter = (*dbWriter)(nil)
_ db.Connection = (*MemDB)(nil)
_ db.Reader = (*dbTxn)(nil)
_ db.Writer = (*dbWriter)(nil)
_ db.ReadWriter = (*dbWriter)(nil)
)
// item is a btree.Item with byte slices as keys and values
@ -76,23 +76,23 @@ func (dbm *MemDB) Close() error {
return nil
}
// Versions implements DBConnection.
// Versions implements Connection.
func (dbm *MemDB) Versions() (db.VersionSet, error) {
dbm.mtx.RLock()
defer dbm.mtx.RUnlock()
return dbm.vmgr, nil
}
// Reader implements DBConnection.
func (dbm *MemDB) Reader() db.DBReader {
// Reader implements Connection.
func (dbm *MemDB) Reader() db.Reader {
dbm.mtx.RLock()
defer dbm.mtx.RUnlock()
ret := dbm.newTxn(dbm.btree)
return &ret
}
// ReaderAt implements DBConnection.
func (dbm *MemDB) ReaderAt(version uint64) (db.DBReader, error) {
// ReaderAt implements Connection.
func (dbm *MemDB) ReaderAt(version uint64) (db.Reader, error) {
dbm.mtx.RLock()
defer dbm.mtx.RUnlock()
tree, ok := dbm.saved[version]
@ -103,13 +103,13 @@ func (dbm *MemDB) ReaderAt(version uint64) (db.DBReader, error) {
return &ret, nil
}
// Writer implements DBConnection.
func (dbm *MemDB) Writer() db.DBWriter {
// Writer implements Connection.
func (dbm *MemDB) Writer() db.Writer {
return dbm.ReadWriter()
}
// ReadWriter implements DBConnection.
func (dbm *MemDB) ReadWriter() db.DBReadWriter {
// ReadWriter implements Connection.
func (dbm *MemDB) ReadWriter() db.ReadWriter {
dbm.mtx.RLock()
defer dbm.mtx.RUnlock()
atomic.AddInt32(&dbm.openWriters, 1)
@ -134,12 +134,12 @@ func (dbm *MemDB) save(target uint64) (uint64, error) {
return target, nil
}
// SaveVersion implements DBConnection.
// SaveVersion implements Connection.
func (dbm *MemDB) SaveNextVersion() (uint64, error) {
return dbm.save(0)
}
// SaveNextVersion implements DBConnection.
// SaveNextVersion implements Connection.
func (dbm *MemDB) SaveVersion(target uint64) error {
if target == 0 {
return db.ErrInvalidVersion
@ -148,7 +148,7 @@ func (dbm *MemDB) SaveVersion(target uint64) error {
return err
}
// DeleteVersion implements DBConnection.
// DeleteVersion implements Connection.
func (dbm *MemDB) DeleteVersion(target uint64) error {
dbm.mtx.Lock()
defer dbm.mtx.Unlock()
@ -300,7 +300,7 @@ func (dbm *MemDB) Print() error {
return nil
}
// Stats implements DBConnection.
// Stats implements Connection.
func (dbm *MemDB) Stats() map[string]string {
dbm.mtx.RLock()
defer dbm.mtx.RUnlock()

View File

@ -28,7 +28,7 @@ func BenchmarkMemDBRandomReadsWrites(b *testing.B) {
dbtest.BenchmarkRandomReadsWrites(b, dbm.ReadWriter())
}
func load(t *testing.T, _ string) db.DBConnection {
func load(t *testing.T, _ string) db.Connection {
return NewDB()
}

View File

@ -7,51 +7,51 @@ import (
)
// prefixed Reader
type prefixR struct {
db db.DBReader
type Reader struct {
db db.Reader
prefix []byte
}
// prefixed ReadWriter
type prefixRW struct {
db db.DBReadWriter
type ReadWriter struct {
db db.ReadWriter
prefix []byte
}
// prefixed Writer
type prefixW struct {
db db.DBWriter
type Writer struct {
db db.Writer
prefix []byte
}
var (
_ db.DBReader = (*prefixR)(nil)
_ db.DBReadWriter = (*prefixRW)(nil)
_ db.DBWriter = (*prefixW)(nil)
_ db.Reader = (*Reader)(nil)
_ db.ReadWriter = (*ReadWriter)(nil)
_ db.Writer = (*Writer)(nil)
)
// NewPrefixReader returns a DBReader that only has access to the subset of DB keys
// NewReadereader returns a DBReader that only has access to the subset of DB keys
// that contain the given prefix.
func NewPrefixReader(dbr db.DBReader, prefix []byte) prefixR {
return prefixR{
func NewReader(dbr db.Reader, prefix []byte) Reader {
return Reader{
prefix: prefix,
db: dbr,
}
}
// NewPrefixReadWriter returns a DBReader that only has access to the subset of DB keys
// NewReadWriter returns a DBReader that only has access to the subset of DB keys
// that contain the given prefix.
func NewPrefixReadWriter(dbrw db.DBReadWriter, prefix []byte) prefixRW {
return prefixRW{
func NewReadWriter(dbrw db.ReadWriter, prefix []byte) ReadWriter {
return ReadWriter{
prefix: prefix,
db: dbrw,
}
}
// NewPrefixWriter returns a DBWriter that reads/writes only from the subset of DB keys
// NewWriterriter returns a DBWriter that reads/writes only from the subset of DB keys
// that contain the given prefix
func NewPrefixWriter(dbw db.DBWriter, prefix []byte) prefixW {
return prefixW{
func NewWriter(dbw db.Writer, prefix []byte) Writer {
return Writer{
prefix: prefix,
db: dbw,
}
@ -62,7 +62,7 @@ func prefixed(prefix, key []byte) []byte {
}
// Get implements DBReader.
func (pdb prefixR) Get(key []byte) ([]byte, error) {
func (pdb Reader) Get(key []byte) ([]byte, error) {
if len(key) == 0 {
return nil, db.ErrKeyEmpty
}
@ -70,7 +70,7 @@ func (pdb prefixR) Get(key []byte) ([]byte, error) {
}
// Has implements DBReader.
func (pdb prefixR) Has(key []byte) (bool, error) {
func (pdb Reader) Has(key []byte) (bool, error) {
if len(key) == 0 {
return false, db.ErrKeyEmpty
}
@ -78,7 +78,7 @@ func (pdb prefixR) Has(key []byte) (bool, error) {
}
// Iterator implements DBReader.
func (pdb prefixR) Iterator(start, end []byte) (db.Iterator, error) {
func (pdb Reader) Iterator(start, end []byte) (db.Iterator, error) {
if (start != nil && len(start) == 0) || (end != nil && len(end) == 0) {
return nil, db.ErrKeyEmpty
}
@ -97,7 +97,7 @@ func (pdb prefixR) Iterator(start, end []byte) (db.Iterator, error) {
}
// ReverseIterator implements DBReader.
func (pdb prefixR) ReverseIterator(start, end []byte) (db.Iterator, error) {
func (pdb Reader) ReverseIterator(start, end []byte) (db.Iterator, error) {
if (start != nil && len(start) == 0) || (end != nil && len(end) == 0) {
return nil, db.ErrKeyEmpty
}
@ -116,10 +116,10 @@ func (pdb prefixR) ReverseIterator(start, end []byte) (db.Iterator, error) {
}
// Discard implements DBReader.
func (pdb prefixR) Discard() error { return pdb.db.Discard() }
func (pdb Reader) Discard() error { return pdb.db.Discard() }
// Set implements DBReadWriter.
func (pdb prefixRW) Set(key []byte, value []byte) error {
func (pdb ReadWriter) Set(key []byte, value []byte) error {
if len(key) == 0 {
return db.ErrKeyEmpty
}
@ -127,7 +127,7 @@ func (pdb prefixRW) Set(key []byte, value []byte) error {
}
// Delete implements DBReadWriter.
func (pdb prefixRW) Delete(key []byte) error {
func (pdb ReadWriter) Delete(key []byte) error {
if len(key) == 0 {
return db.ErrKeyEmpty
}
@ -135,33 +135,33 @@ func (pdb prefixRW) Delete(key []byte) error {
}
// Get implements DBReadWriter.
func (pdb prefixRW) Get(key []byte) ([]byte, error) {
return NewPrefixReader(pdb.db, pdb.prefix).Get(key)
func (pdb ReadWriter) Get(key []byte) ([]byte, error) {
return NewReader(pdb.db, pdb.prefix).Get(key)
}
// Has implements DBReadWriter.
func (pdb prefixRW) Has(key []byte) (bool, error) {
return NewPrefixReader(pdb.db, pdb.prefix).Has(key)
func (pdb ReadWriter) Has(key []byte) (bool, error) {
return NewReader(pdb.db, pdb.prefix).Has(key)
}
// Iterator implements DBReadWriter.
func (pdb prefixRW) Iterator(start, end []byte) (db.Iterator, error) {
return NewPrefixReader(pdb.db, pdb.prefix).Iterator(start, end)
func (pdb ReadWriter) Iterator(start, end []byte) (db.Iterator, error) {
return NewReader(pdb.db, pdb.prefix).Iterator(start, end)
}
// ReverseIterator implements DBReadWriter.
func (pdb prefixRW) ReverseIterator(start, end []byte) (db.Iterator, error) {
return NewPrefixReader(pdb.db, pdb.prefix).ReverseIterator(start, end)
func (pdb ReadWriter) ReverseIterator(start, end []byte) (db.Iterator, error) {
return NewReader(pdb.db, pdb.prefix).ReverseIterator(start, end)
}
// Close implements DBReadWriter.
func (pdb prefixRW) Commit() error { return pdb.db.Commit() }
func (pdb ReadWriter) Commit() error { return pdb.db.Commit() }
// Discard implements DBReadWriter.
func (pdb prefixRW) Discard() error { return pdb.db.Discard() }
func (pdb ReadWriter) Discard() error { return pdb.db.Discard() }
// Set implements DBReadWriter.
func (pdb prefixW) Set(key []byte, value []byte) error {
func (pdb Writer) Set(key []byte, value []byte) error {
if len(key) == 0 {
return db.ErrKeyEmpty
}
@ -169,7 +169,7 @@ func (pdb prefixW) Set(key []byte, value []byte) error {
}
// Delete implements DBWriter.
func (pdb prefixW) Delete(key []byte) error {
func (pdb Writer) Delete(key []byte) error {
if len(key) == 0 {
return db.ErrKeyEmpty
}
@ -177,10 +177,10 @@ func (pdb prefixW) Delete(key []byte) error {
}
// Close implements DBWriter.
func (pdb prefixW) Commit() error { return pdb.db.Commit() }
func (pdb Writer) Commit() error { return pdb.db.Commit() }
// Discard implements DBReadWriter.
func (pdb prefixW) Discard() error { return pdb.db.Discard() }
func (pdb Writer) Discard() error { return pdb.db.Discard() }
func cp(bz []byte) (ret []byte) {
ret = make([]byte, len(bz))

View File

@ -9,7 +9,7 @@ import (
// IteratePrefix is a convenience function for iterating over a key domain
// restricted by prefix.
func IteratePrefix(dbr db.DBReader, prefix []byte) (db.Iterator, error) {
func IteratePrefix(dbr db.Reader, prefix []byte) (db.Iterator, error) {
var start, end []byte
if len(prefix) != 0 {
start = prefix

View File

@ -11,7 +11,7 @@ import (
pfx "github.com/cosmos/cosmos-sdk/db/prefix"
)
func fillDBWithStuff(t *testing.T, dbw db.DBWriter) {
func fillDBWithStuff(t *testing.T, dbw db.Writer) {
// Under "key" prefix
require.NoError(t, dbw.Set([]byte("key"), []byte("value")))
require.NoError(t, dbw.Set([]byte("key1"), []byte("value1")))
@ -24,16 +24,16 @@ func fillDBWithStuff(t *testing.T, dbw db.DBWriter) {
require.NoError(t, dbw.Commit())
}
func mockDBWithStuff(t *testing.T) db.DBConnection {
func mockDBWithStuff(t *testing.T) db.Connection {
dbm := memdb.NewDB()
fillDBWithStuff(t, dbm.Writer())
return dbm
}
func makePrefixReader(t *testing.T, dbc db.DBConnection, pre []byte) db.DBReader {
func makePrefixReader(t *testing.T, dbc db.Connection, pre []byte) db.Reader {
view := dbc.Reader()
require.NotNil(t, view)
return pfx.NewPrefixReader(view, pre)
return pfx.NewReader(view, pre)
}
func TestPrefixDBSimple(t *testing.T) {
@ -137,7 +137,7 @@ func TestPrefixDBViewVersion(t *testing.T) {
fillDBWithStuff(t, dbm.Writer())
id, err := dbm.SaveNextVersion()
require.NoError(t, err)
pdb := pfx.NewPrefixReadWriter(dbm.ReadWriter(), prefix)
pdb := pfx.NewReadWriter(dbm.ReadWriter(), prefix)
pdb.Set([]byte("1"), []byte("newvalue1"))
pdb.Delete([]byte("2"))
@ -147,7 +147,7 @@ func TestPrefixDBViewVersion(t *testing.T) {
dbview, err := dbm.ReaderAt(id)
require.NotNil(t, dbview)
require.NoError(t, err)
view := pfx.NewPrefixReader(dbview, prefix)
view := pfx.NewReader(dbview, prefix)
require.NotNil(t, view)
defer view.Discard()

View File

@ -15,7 +15,7 @@ type rocksDBBatch struct {
mgr *dbManager
}
var _ db.DBWriter = (*rocksDBBatch)(nil)
var _ db.Writer = (*rocksDBBatch)(nil)
func (mgr *dbManager) newRocksDBBatch() *rocksDBBatch {
return &rocksDBBatch{
@ -24,7 +24,7 @@ func (mgr *dbManager) newRocksDBBatch() *rocksDBBatch {
}
}
// Set implements DBWriter.
// Set implements Writer.
func (b *rocksDBBatch) Set(key, value []byte) error {
if err := dbutil.ValidateKv(key, value); err != nil {
return err
@ -36,7 +36,7 @@ func (b *rocksDBBatch) Set(key, value []byte) error {
return nil
}
// Delete implements DBWriter.
// Delete implements Writer.
func (b *rocksDBBatch) Delete(key []byte) error {
if len(key) == 0 {
return db.ErrKeyEmpty
@ -48,7 +48,7 @@ func (b *rocksDBBatch) Delete(key []byte) error {
return nil
}
// Write implements DBWriter.
// Write implements Writer.
func (b *rocksDBBatch) Commit() (err error) {
if b.batch == nil {
return db.ErrTransactionClosed
@ -58,7 +58,7 @@ func (b *rocksDBBatch) Commit() (err error) {
return
}
// Close implements DBWriter.
// Close implements Writer.
func (b *rocksDBBatch) Discard() error {
if b.batch != nil {
defer atomic.AddInt32(&b.mgr.openWriters, -1)

View File

@ -22,17 +22,17 @@ var (
)
var (
_ db.DBConnection = (*RocksDB)(nil)
_ db.DBReader = (*dbTxn)(nil)
_ db.DBWriter = (*dbWriter)(nil)
_ db.DBReadWriter = (*dbWriter)(nil)
_ db.Connection = (*RocksDB)(nil)
_ db.Reader = (*dbTxn)(nil)
_ db.Writer = (*dbWriter)(nil)
_ db.ReadWriter = (*dbWriter)(nil)
)
// RocksDB is a connection to a RocksDB key-value database.
type RocksDB = dbManager
type dbManager struct {
current *dbConnection
current *Connection
dir string
opts dbOptions
vmgr *db.VersionManager
@ -42,7 +42,7 @@ type dbManager struct {
cpCache checkpointCache
}
type dbConnection = gorocksdb.OptimisticTransactionDB
type Connection = gorocksdb.OptimisticTransactionDB
type checkpointCache struct {
cache map[uint64]*cpCacheEntry
@ -50,7 +50,7 @@ type checkpointCache struct {
}
type cpCacheEntry struct {
cxn *dbConnection
cxn *Connection
openCount uint
}
@ -158,7 +158,7 @@ func (mgr *dbManager) checkpointPath(version uint64) (string, error) {
return dbPath, nil
}
func (mgr *dbManager) openCheckpoint(version uint64) (*dbConnection, error) {
func (mgr *dbManager) openCheckpoint(version uint64) (*Connection, error) {
mgr.cpCache.mtx.Lock()
defer mgr.cpCache.mtx.Unlock()
cp, has := mgr.cpCache.cache[version]
@ -178,7 +178,7 @@ func (mgr *dbManager) openCheckpoint(version uint64) (*dbConnection, error) {
return db, nil
}
func (mgr *dbManager) Reader() db.DBReader {
func (mgr *dbManager) Reader() db.Reader {
mgr.mtx.RLock()
defer mgr.mtx.RUnlock()
return &dbTxn{
@ -189,7 +189,7 @@ func (mgr *dbManager) Reader() db.DBReader {
}
}
func (mgr *dbManager) ReaderAt(version uint64) (db.DBReader, error) {
func (mgr *dbManager) ReaderAt(version uint64) (db.Reader, error) {
mgr.mtx.RLock()
defer mgr.mtx.RUnlock()
d, err := mgr.openCheckpoint(version)
@ -204,7 +204,7 @@ func (mgr *dbManager) ReaderAt(version uint64) (db.DBReader, error) {
}, nil
}
func (mgr *dbManager) ReadWriter() db.DBReadWriter {
func (mgr *dbManager) ReadWriter() db.ReadWriter {
mgr.mtx.RLock()
defer mgr.mtx.RUnlock()
atomic.AddInt32(&mgr.openWriters, 1)
@ -214,7 +214,7 @@ func (mgr *dbManager) ReadWriter() db.DBReadWriter {
}}
}
func (mgr *dbManager) Writer() db.DBWriter {
func (mgr *dbManager) Writer() db.Writer {
mgr.mtx.RLock()
defer mgr.mtx.RUnlock()
atomic.AddInt32(&mgr.openWriters, 1)
@ -227,12 +227,12 @@ func (mgr *dbManager) Versions() (db.VersionSet, error) {
return mgr.vmgr, nil
}
// SaveNextVersion implements DBConnection.
// SaveNextVersion implements Connection.
func (mgr *dbManager) SaveNextVersion() (uint64, error) {
return mgr.save(0)
}
// SaveVersion implements DBConnection.
// SaveVersion implements Connection.
func (mgr *dbManager) SaveVersion(target uint64) error {
if target == 0 {
return db.ErrInvalidVersion
@ -321,14 +321,14 @@ func (mgr *dbManager) restoreFromCheckpoint(version uint64, path string) error {
return nil
}
// Close implements DBConnection.
// Close implements Connection.
func (mgr *dbManager) Close() error {
mgr.current.Close()
mgr.opts.destroy()
return nil
}
// Stats implements DBConnection.
// Stats implements Connection.
func (mgr *dbManager) Stats() map[string]string {
keys := []string{"rocksdb.stats"}
stats := make(map[string]string, len(keys))
@ -338,7 +338,7 @@ func (mgr *dbManager) Stats() map[string]string {
return stats
}
// Get implements DBReader.
// Get implements Reader.
func (tx *dbTxn) Get(key []byte) ([]byte, error) {
if tx.txn == nil {
return nil, db.ErrTransactionClosed
@ -353,7 +353,7 @@ func (tx *dbTxn) Get(key []byte) ([]byte, error) {
return moveSliceToBytes(res), nil
}
// Get implements DBReader.
// Get implements Reader.
func (tx *dbWriter) Get(key []byte) ([]byte, error) {
if tx.txn == nil {
return nil, db.ErrTransactionClosed

View File

@ -13,7 +13,7 @@ import (
"github.com/cosmos/cosmos-sdk/db/dbtest"
)
func load(t *testing.T, dir string) db.DBConnection {
func load(t *testing.T, dir string) db.Connection {
d, err := NewDB(dir)
require.NoError(t, err)
return d

View File

@ -26,23 +26,23 @@ var (
ErrInvalidVersion = errors.New("invalid version")
)
// DBConnection represents a connection to a versioned database.
// Connection represents a connection to a versioned database.
// Records are accessed via transaction objects, and must be safe for concurrent creation
// and read and write access.
// Past versions are only accessible read-only.
type DBConnection interface {
type Connection interface {
// Reader opens a read-only transaction at the current working version.
Reader() DBReader
Reader() Reader
// ReaderAt opens a read-only transaction at a specified version.
// Returns ErrVersionDoesNotExist for invalid versions.
ReaderAt(uint64) (DBReader, error)
ReaderAt(uint64) (Reader, error)
// ReadWriter opens a read-write transaction at the current version.
ReadWriter() DBReadWriter
ReadWriter() ReadWriter
// Writer opens a write-only transaction at the current version.
Writer() DBWriter
Writer() Writer
// Versions returns all saved versions as an immutable set which is safe for concurrent access.
Versions() (VersionSet, error)
@ -74,7 +74,7 @@ type DBConnection interface {
//
// Keys cannot be nil or empty, while values cannot be nil. Keys and values should be considered
// read-only, both when returned and when given, and must be copied before they are modified.
type DBReader interface {
type Reader interface {
// Get fetches the value of the given key, or nil if it does not exist.
// CONTRACT: key, value readonly []byte
Get([]byte) ([]byte, error)
@ -110,7 +110,7 @@ type DBReader interface {
// Callers must call Commit or Discard when done with the transaction.
//
// This can be used to wrap a write-optimized batch object if provided by the backend implementation.
type DBWriter interface {
type Writer interface {
// Set sets the value for the given key, replacing it if it already exists.
// CONTRACT: key, value readonly []byte
Set([]byte, []byte) error
@ -127,9 +127,9 @@ type DBWriter interface {
}
// DBReadWriter is a transaction interface that allows both reading and writing.
type DBReadWriter interface {
DBReader
DBWriter
type ReadWriter interface {
Reader
Writer
}
// Iterator represents an iterator over a domain of keys. Callers must call Close when done.
@ -138,7 +138,7 @@ type DBReadWriter interface {
//
// Callers must make sure the iterator is valid before calling any methods on it, otherwise
// these methods will panic. This is in part caused by most backend databases using this convention.
// Note that the iterator is invalid on contruction: Next() must be called to initialize it to its
// Note that the iterator is invalid on construction: Next() must be called to initialize it to its
// starting position.
//
// As with DBReader, keys and values should be considered read-only, and must be copied before they are

View File

@ -249,7 +249,7 @@ An interface providing only the basic CRUD functionality (`Get`, `Set`, `Has`, a
This is the new interface (or, set of interfaces) for the main client store, replacing the role of `store/types.MultiStore` (v1). There are a few significant differences in behavior compared with v1:
* Commits are atomic and are performed on the entire store state; individual substores cannot be committed separately and cannot have different version numbers.
* The store's current version and version history track that of the backing `db.DBConnection`. Past versions are accessible read-only.
* The store's current version and version history track that of the backing `db.Connection`. Past versions are accessible read-only.
* The set of valid substores is defined at initialization and cannot be updated dynamically in an existing store instance.
### `CommitMultiStore`

View File

@ -7,18 +7,18 @@ import (
var _ = (*storetypes.Iterator)(nil)
type dbAsStoreIter struct {
type AsStoreIter struct {
dbm.Iterator
valid bool
}
// DBToStoreIterator returns an iterator wrapping the given iterator so that it satisfies the
// (store/types).Iterator interface.
func DBToStoreIterator(source dbm.Iterator) *dbAsStoreIter {
ret := &dbAsStoreIter{Iterator: source}
func ToStoreIterator(source dbm.Iterator) *AsStoreIter {
ret := &AsStoreIter{Iterator: source}
ret.Next() // The DB iterator must be primed before it can access the first element, because Next also returns the validity status
return ret
}
func (it *dbAsStoreIter) Next() { it.valid = it.Iterator.Next() }
func (it *dbAsStoreIter) Valid() bool { return it.valid }
func (it *AsStoreIter) Next() { it.valid = it.Iterator.Next() }
func (it *AsStoreIter) Valid() bool { return it.valid }

View File

@ -7,6 +7,7 @@ import (
"path/filepath"
"github.com/tendermint/tendermint/types"
db "github.com/tendermint/tm-db"
abci "github.com/tendermint/tendermint/abci/types"
"github.com/tendermint/tendermint/libs/log"
@ -21,7 +22,7 @@ import (
// similar to a real app. Make sure rootDir is empty before running the test,
// in order to guarantee consistent results
func NewApp(rootDir string, logger log.Logger) (abci.Application, error) {
db, err := sdk.NewLevelDB("mock", filepath.Join(rootDir, "data"))
db, err := db.NewGoLevelDB("mock", filepath.Join(rootDir, "data"))
if err != nil {
return nil, err
}

View File

@ -16,6 +16,7 @@ import (
abcitypes "github.com/tendermint/tendermint/abci/types"
rosettatypes "github.com/coinbase/rosetta-sdk-go/types"
"google.golang.org/grpc/credentials/insecure"
"google.golang.org/grpc/metadata"
"github.com/tendermint/tendermint/rpc/client/http"
@ -98,7 +99,7 @@ func NewClient(cfg *Config) (*Client, error) {
// Bootstrap is gonna connect the client to the endpoints
func (c *Client) Bootstrap() error {
grpcConn, err := grpc.Dial(c.config.GRPCEndpoint, grpc.WithInsecure())
grpcConn, err := grpc.Dial(c.config.GRPCEndpoint, grpc.WithTransportCredentials(insecure.NewCredentials()))
if err != nil {
return err
}

View File

@ -96,7 +96,7 @@ func bindFlags(basename string, cmd *cobra.Command, v *viper.Viper) (err error)
}
})
return
return err
}
// InterceptConfigsPreRunHandler performs a pre-run function for the root daemon

View File

@ -18,16 +18,15 @@ import (
tmtypes "github.com/tendermint/tendermint/types"
dbm "github.com/tendermint/tm-db"
bam "github.com/cosmos/cosmos-sdk/baseapp"
"cosmossdk.io/math"
"github.com/cosmos/cosmos-sdk/depinject"
bankkeeper "github.com/cosmos/cosmos-sdk/x/bank/keeper"
bam "github.com/cosmos/cosmos-sdk/baseapp"
"github.com/cosmos/cosmos-sdk/client"
codectypes "github.com/cosmos/cosmos-sdk/codec/types"
cryptocodec "github.com/cosmos/cosmos-sdk/crypto/codec"
"github.com/cosmos/cosmos-sdk/crypto/keys/ed25519"
"github.com/cosmos/cosmos-sdk/crypto/keys/secp256k1"
cryptotypes "github.com/cosmos/cosmos-sdk/crypto/types"
"github.com/cosmos/cosmos-sdk/depinject"
"github.com/cosmos/cosmos-sdk/server/types"
"github.com/cosmos/cosmos-sdk/simapp/helpers"
"github.com/cosmos/cosmos-sdk/simapp/params"
@ -36,6 +35,7 @@ import (
sdk "github.com/cosmos/cosmos-sdk/types"
"github.com/cosmos/cosmos-sdk/types/errors"
authtypes "github.com/cosmos/cosmos-sdk/x/auth/types"
bankkeeper "github.com/cosmos/cosmos-sdk/x/bank/keeper"
banktypes "github.com/cosmos/cosmos-sdk/x/bank/types"
minttypes "github.com/cosmos/cosmos-sdk/x/mint/types"
stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types"

View File

@ -15,7 +15,7 @@ var _ types.KVStore = Store{}
// Wrapper type for dbm.Db with implementation of KVStore
type Store struct {
DB dbm.DBReadWriter
DB dbm.ReadWriter
}
// Get wraps the underlying DB's Get method panicing on error.
@ -59,7 +59,7 @@ func (dsa Store) Iterator(start, end []byte) types.Iterator {
if err != nil {
panic(err)
}
return dbutil.DBToStoreIterator(iter)
return dbutil.ToStoreIterator(iter)
}
// ReverseIterator wraps the underlying DB's ReverseIterator method panicing on error.
@ -68,7 +68,7 @@ func (dsa Store) ReverseIterator(start, end []byte) types.Iterator {
if err != nil {
panic(err)
}
return dbutil.DBToStoreIterator(iter)
return dbutil.ToStoreIterator(iter)
}
// GetStoreType returns the type of the store.

View File

@ -17,7 +17,7 @@ var (
// commits and thus between blocks. State in Memory store is not committed as part of app state but maintained privately by each node
type Store struct {
dbadapter.Store
conn dbm.DBConnection
conn dbm.Connection
}
// NewStore constructs a new in-memory store.

View File

@ -10,7 +10,7 @@
//
// A declared persistent substore is initially empty and stores nothing in the backing DB until a value is set.
// A non-empty store is stored within a prefixed subdomain of the backing DB (using db/prefix).
// If the MultiStore is configured to use a separate DBConnection for StateCommitmentDB, it will store the
// If the MultiStore is configured to use a separate Connection for StateCommitmentDB, it will store the
// state commitment (SC) store (as an SMT) in subdomains there, and the "flat" state is stored in the main DB.
// Each substore's SC is allocated as an independent SMT, and query proofs contain two components: a proof
// of a key's (non)existence within the substore SMT, and a proof of the substore's existence within the

View File

@ -11,7 +11,7 @@ import (
)
// MigrateFromV1 will migrate the state from iavl to smt
func MigrateFromV1(rootMultiStore *v1Store.Store, store2db dbm.DBConnection, storeConfig StoreConfig) (*Store, error) {
func MigrateFromV1(rootMultiStore *v1Store.Store, store2db dbm.Connection, storeConfig StoreConfig) (*Store, error) {
type namedStore struct {
*iavl.Store
name string

View File

@ -35,7 +35,7 @@ func multiStoreConfig(t *testing.T, stores int) StoreConfig {
return opts
}
func newMultiStoreWithGeneratedData(t *testing.T, db dbm.DBConnection, stores int, storeKeys uint64) *Store {
func newMultiStoreWithGeneratedData(t *testing.T, db dbm.Connection, stores int, storeKeys uint64) *Store {
cfg := multiStoreConfig(t, stores)
store, err := NewStore(db, cfg)
require.NoError(t, err)
@ -68,7 +68,7 @@ func newMultiStoreWithGeneratedData(t *testing.T, db dbm.DBConnection, stores in
return store
}
func newMultiStoreWithBasicData(t *testing.T, db dbm.DBConnection, stores int) *Store {
func newMultiStoreWithBasicData(t *testing.T, db dbm.Connection, stores int) *Store {
cfg := multiStoreConfig(t, stores)
store, err := NewStore(db, cfg)
require.NoError(t, err)
@ -85,7 +85,7 @@ func newMultiStoreWithBasicData(t *testing.T, db dbm.DBConnection, stores int) *
return store
}
func newMultiStore(t *testing.T, db dbm.DBConnection, stores int) *Store {
func newMultiStore(t *testing.T, db dbm.Connection, stores int) *Store {
cfg := multiStoreConfig(t, stores)
store, err := NewStore(db, cfg)
require.NoError(t, err)

View File

@ -62,7 +62,7 @@ type StoreConfig struct {
InitialVersion uint64
// The backing DB to use for the state commitment Merkle tree data.
// If nil, Merkle data is stored in the state storage DB under a separate prefix.
StateCommitmentDB dbm.DBConnection
StateCommitmentDB dbm.Connection
prefixRegistry
PersistentCache types.MultiStorePersistentCache
@ -82,10 +82,10 @@ type StoreSchema map[string]types.StoreType
// * The state commitment store of each substore consists of a independent SMT.
// * The state commitment of the root store consists of a Merkle map of all registered persistent substore names to the root hash of their corresponding SMTs
type Store struct {
stateDB dbm.DBConnection
stateTxn dbm.DBReadWriter
StateCommitmentDB dbm.DBConnection
stateCommitmentTxn dbm.DBReadWriter
stateDB dbm.Connection
stateTxn dbm.ReadWriter
StateCommitmentDB dbm.Connection
stateCommitmentTxn dbm.ReadWriter
schema StoreSchema
mem *mem.Store
@ -104,8 +104,8 @@ type Store struct {
type substore struct {
root *Store
name string
dataBucket dbm.DBReadWriter
indexBucket dbm.DBReadWriter
dataBucket dbm.ReadWriter
indexBucket dbm.ReadWriter
stateCommitmentStore *smt.Store
}
@ -118,8 +118,8 @@ type cacheStore struct {
// Read-only store for querying past versions
type viewStore struct {
stateView dbm.DBReader
stateCommitmentView dbm.DBReader
stateView dbm.Reader
stateCommitmentView dbm.Reader
substoreCache map[string]*viewSubstore
schema StoreSchema
}
@ -127,8 +127,8 @@ type viewStore struct {
type viewSubstore struct {
root *viewStore
name string
dataBucket dbm.DBReader
indexBucket dbm.DBReader
dataBucket dbm.Reader
indexBucket dbm.Reader
stateCommitmentStore *smt.Store
}
@ -193,7 +193,7 @@ func (ss StoreSchema) equal(that StoreSchema) bool {
}
// Parses a schema from the DB
func readSavedSchema(bucket dbm.DBReader) (*prefixRegistry, error) {
func readSavedSchema(bucket dbm.Reader) (*prefixRegistry, error) {
ret := prefixRegistry{StoreSchema: StoreSchema{}}
it, err := bucket.Iterator(nil, nil)
if err != nil {
@ -215,7 +215,7 @@ func readSavedSchema(bucket dbm.DBReader) (*prefixRegistry, error) {
// NewStore constructs a MultiStore directly from a database.
// Creates a new store if no data exists; otherwise loads existing data.
func NewStore(db dbm.DBConnection, opts StoreConfig) (ret *Store, err error) {
func NewStore(db dbm.Connection, opts StoreConfig) (ret *Store, err error) {
versions, err := db.Versions()
if err != nil {
return
@ -277,7 +277,7 @@ func NewStore(db dbm.DBConnection, opts StoreConfig) (ret *Store, err error) {
}
// Now load the substore schema
schemaView := prefixdb.NewPrefixReader(ret.stateDB.Reader(), schemaPrefix)
schemaView := prefixdb.NewReader(ret.stateDB.Reader(), schemaPrefix)
defer func() {
if err != nil {
err = util.CombineErrors(err, schemaView.Discard(), "schemaView.Discard also failed")
@ -296,12 +296,11 @@ func NewStore(db dbm.DBConnection, opts StoreConfig) (ret *Store, err error) {
}
reg.reserved = make([]string, len(opts.reserved))
copy(reg.reserved, opts.reserved)
} else {
if !reg.equal(opts.StoreSchema) {
err = errors.New("loaded schema does not match configured schema")
return
}
} else if !reg.equal(opts.StoreSchema) {
err = errors.New("loaded schema does not match configured schema")
return
}
// Apply migrations, then clear old schema and write the new one
for _, upgrades := range opts.Upgrades {
err = reg.migrate(ret, upgrades)
@ -309,7 +308,7 @@ func NewStore(db dbm.DBConnection, opts StoreConfig) (ret *Store, err error) {
return
}
}
schemaWriter := prefixdb.NewPrefixWriter(ret.stateTxn, schemaPrefix)
schemaWriter := prefixdb.NewWriter(ret.stateTxn, schemaPrefix)
it, err := schemaView.Iterator(nil, nil)
if err != nil {
return
@ -336,7 +335,7 @@ func NewStore(db dbm.DBConnection, opts StoreConfig) (ret *Store, err error) {
}
}
ret.schema = reg.StoreSchema
return
return ret, err
}
func (s *Store) Close() error {
@ -368,7 +367,7 @@ func (pr *prefixRegistry) migrate(store *Store, upgrades types.StoreUpgrades) er
delete(pr.StoreSchema, key)
pfx := substorePrefix(key)
subReader := prefixdb.NewPrefixReader(reader, pfx)
subReader := prefixdb.NewReader(reader, pfx)
it, err := subReader.Iterator(nil, nil)
if err != nil {
return err
@ -378,7 +377,7 @@ func (pr *prefixRegistry) migrate(store *Store, upgrades types.StoreUpgrades) er
}
it.Close()
if store.StateCommitmentDB != nil {
subReader = prefixdb.NewPrefixReader(scReader, pfx)
subReader = prefixdb.NewReader(scReader, pfx)
it, err = subReader.Iterator(nil, nil)
if err != nil {
return err
@ -406,8 +405,8 @@ func (pr *prefixRegistry) migrate(store *Store, upgrades types.StoreUpgrades) er
oldPrefix := substorePrefix(rename.OldKey)
newPrefix := substorePrefix(rename.NewKey)
subReader := prefixdb.NewPrefixReader(reader, oldPrefix)
subWriter := prefixdb.NewPrefixWriter(store.stateTxn, newPrefix)
subReader := prefixdb.NewReader(reader, oldPrefix)
subWriter := prefixdb.NewWriter(store.stateTxn, newPrefix)
it, err := subReader.Iterator(nil, nil)
if err != nil {
return err
@ -417,8 +416,8 @@ func (pr *prefixRegistry) migrate(store *Store, upgrades types.StoreUpgrades) er
}
it.Close()
if store.StateCommitmentDB != nil {
subReader = prefixdb.NewPrefixReader(scReader, oldPrefix)
subWriter = prefixdb.NewPrefixWriter(store.stateCommitmentTxn, newPrefix)
subReader = prefixdb.NewReader(scReader, oldPrefix)
subWriter = prefixdb.NewWriter(store.stateCommitmentTxn, newPrefix)
it, err = subReader.Iterator(nil, nil)
if err != nil {
return err
@ -444,18 +443,18 @@ func substorePrefix(key string) []byte {
}
// GetKVStore implements BasicMultiStore.
func (rs *Store) GetKVStore(skey types.StoreKey) types.KVStore {
func (s *Store) GetKVStore(skey types.StoreKey) types.KVStore {
key := skey.Name()
var parent types.KVStore
typ, has := rs.schema[key]
typ, has := s.schema[key]
if !has {
panic(ErrStoreNotFound(key))
}
switch typ {
case types.StoreTypeMemory:
parent = rs.mem
parent = s.mem
case types.StoreTypeTransient:
parent = rs.tran
parent = s.tran
case types.StoreTypePersistent:
default:
panic(fmt.Errorf("StoreType not supported: %v", typ)) // should never happen
@ -464,27 +463,27 @@ func (rs *Store) GetKVStore(skey types.StoreKey) types.KVStore {
if parent != nil { // store is non-persistent
ret = prefix.NewStore(parent, []byte(key))
} else { // store is persistent
sub, err := rs.getSubstore(key)
sub, err := s.getSubstore(key)
if err != nil {
panic(err)
}
rs.substoreCache[key] = sub
s.substoreCache[key] = sub
ret = sub
}
// Wrap with trace/listen if needed. Note: we don't cache this, so users must get a new substore after
// modifying tracers/listeners.
return rs.wrapTraceListen(ret, skey)
return s.wrapTraceListen(ret, skey)
}
// Gets a persistent substore. This reads, but does not update the substore cache.
// Use it in cases where we need to access a store internally (e.g. read/write Merkle keys, queries)
func (rs *Store) getSubstore(key string) (*substore, error) {
if cached, has := rs.substoreCache[key]; has {
func (s *Store) getSubstore(key string) (*substore, error) {
if cached, has := s.substoreCache[key]; has {
return cached, nil
}
pfx := substorePrefix(key)
stateRW := prefixdb.NewPrefixReadWriter(rs.stateTxn, pfx)
stateCommitmentRW := prefixdb.NewPrefixReadWriter(rs.stateCommitmentTxn, pfx)
stateRW := prefixdb.NewReadWriter(s.stateTxn, pfx)
stateCommitmentRW := prefixdb.NewReadWriter(s.stateCommitmentTxn, pfx)
var stateCommitmentStore *smt.Store
rootHash, err := stateRW.Get(substoreMerkleRootKey)
@ -494,15 +493,15 @@ func (rs *Store) getSubstore(key string) (*substore, error) {
if rootHash != nil {
stateCommitmentStore = loadSMT(stateCommitmentRW, rootHash)
} else {
smtdb := prefixdb.NewPrefixReadWriter(stateCommitmentRW, smtPrefix)
smtdb := prefixdb.NewReadWriter(stateCommitmentRW, smtPrefix)
stateCommitmentStore = smt.NewStore(smtdb)
}
return &substore{
root: rs,
root: s,
name: key,
dataBucket: prefixdb.NewPrefixReadWriter(stateRW, dataPrefix),
indexBucket: prefixdb.NewPrefixReadWriter(stateRW, indexPrefix),
dataBucket: prefixdb.NewReadWriter(stateRW, dataPrefix),
indexBucket: prefixdb.NewReadWriter(stateRW, indexPrefix),
stateCommitmentStore: stateCommitmentStore,
}, nil
}
@ -510,10 +509,10 @@ func (rs *Store) getSubstore(key string) (*substore, error) {
// Resets a substore's state after commit (because root stateTxn has been discarded)
func (s *substore) refresh(rootHash []byte) {
pfx := substorePrefix(s.name)
stateRW := prefixdb.NewPrefixReadWriter(s.root.stateTxn, pfx)
stateCommitmentRW := prefixdb.NewPrefixReadWriter(s.root.stateCommitmentTxn, pfx)
s.dataBucket = prefixdb.NewPrefixReadWriter(stateRW, dataPrefix)
s.indexBucket = prefixdb.NewPrefixReadWriter(stateRW, indexPrefix)
stateRW := prefixdb.NewReadWriter(s.root.stateTxn, pfx)
stateCommitmentRW := prefixdb.NewReadWriter(s.root.stateCommitmentTxn, pfx)
s.dataBucket = prefixdb.NewReadWriter(stateRW, dataPrefix)
s.indexBucket = prefixdb.NewReadWriter(stateRW, indexPrefix)
s.stateCommitmentStore = loadSMT(stateCommitmentRW, rootHash)
}
@ -584,7 +583,7 @@ func (s *Store) commit(target uint64) (id *types.CommitID, err error) {
// Update substore Merkle roots
for key, storeHash := range storeHashes {
pfx := substorePrefix(key)
stateW := prefixdb.NewPrefixReadWriter(s.stateTxn, pfx)
stateW := prefixdb.NewReadWriter(s.stateTxn, pfx)
if err = stateW.Set(substoreMerkleRootKey, storeHash); err != nil {
return
}
@ -671,20 +670,20 @@ func (s *Store) LastCommitID() types.CommitID {
}
// SetInitialVersion implements CommitMultiStore.
func (rs *Store) SetInitialVersion(version uint64) error {
rs.InitialVersion = uint64(version)
func (s *Store) SetInitialVersion(version uint64) error {
s.InitialVersion = version
return nil
}
// GetVersion implements CommitMultiStore.
func (rs *Store) GetVersion(version int64) (types.BasicMultiStore, error) {
return rs.getView(version)
func (s *Store) GetVersion(version int64) (types.BasicMultiStore, error) {
return s.getView(version)
}
// CacheMultiStore implements BasicMultiStore.
func (rs *Store) CacheMultiStore() types.CacheMultiStore {
func (s *Store) CacheMultiStore() types.CacheMultiStore {
return &cacheStore{
source: rs,
source: s,
substores: map[string]types.CacheKVStore{},
traceListenMixin: newTraceListenMixin(),
}
@ -694,13 +693,13 @@ func (rs *Store) CacheMultiStore() types.CacheMultiStore {
// If PruneNothing, this is a no-op.
// If other strategy, this height is persisted until it is
// less than <current height> - KeepRecent and <current height> % Interval == 0
func (rs *Store) PruneSnapshotHeight(height int64) {
func (s *Store) PruneSnapshotHeight(height int64) {
panic("not implemented")
}
// SetSnapshotInterval sets the interval at which the snapshots are taken.
// It is used by the store to determine which heights to retain until after the snapshot is complete.
func (rs *Store) SetSnapshotInterval(snapshotInterval uint64) {
func (s *Store) SetSnapshotInterval(snapshotInterval uint64) {
panic("not implemented")
}
@ -729,7 +728,7 @@ func parsePath(path string) (storeName string, subpath string, err error) {
// If latest-1 is not present, use latest (which must be present)
// if you care to have the latest data to see a tx results, you must
// explicitly set the height you want to see
func (rs *Store) Query(req abci.RequestQuery) (res abci.ResponseQuery) {
func (s *Store) Query(req abci.RequestQuery) (res abci.ResponseQuery) {
if len(req.Data) == 0 {
return sdkerrors.QueryResult(sdkerrors.Wrap(sdkerrors.ErrTxDecode, "query cannot be zero length"), false)
}
@ -737,7 +736,7 @@ func (rs *Store) Query(req abci.RequestQuery) (res abci.ResponseQuery) {
// if height is 0, use the latest height
height := req.Height
if height == 0 {
versions, err := rs.stateDB.Versions()
versions, err := s.stateDB.Versions()
if err != nil {
return sdkerrors.QueryResult(errors.New("failed to get version info"), false)
}
@ -757,7 +756,7 @@ func (rs *Store) Query(req abci.RequestQuery) (res abci.ResponseQuery) {
if err != nil {
return sdkerrors.QueryResult(sdkerrors.Wrapf(err, "failed to parse path"), false)
}
view, err := rs.getView(height)
view, err := s.getView(height)
if err != nil {
if errors.Is(err, dbm.ErrVersionDoesNotExist) {
err = sdkerrors.ErrInvalidHeight
@ -765,7 +764,7 @@ func (rs *Store) Query(req abci.RequestQuery) (res abci.ResponseQuery) {
return sdkerrors.QueryResult(sdkerrors.Wrapf(err, "failed to access height"), false)
}
if _, has := rs.schema[storeName]; !has {
if _, has := s.schema[storeName]; !has {
return sdkerrors.QueryResult(sdkerrors.Wrapf(sdkerrors.ErrUnknownRequest, "no such store: %s", storeName), false)
}
substore, err := view.getSubstore(storeName)
@ -816,8 +815,8 @@ func (rs *Store) Query(req abci.RequestQuery) (res abci.ResponseQuery) {
return res
}
func loadSMT(stateCommitmentTxn dbm.DBReadWriter, root []byte) *smt.Store {
smtdb := prefixdb.NewPrefixReadWriter(stateCommitmentTxn, smtPrefix)
func loadSMT(stateCommitmentTxn dbm.ReadWriter, root []byte) *smt.Store {
smtdb := prefixdb.NewReadWriter(stateCommitmentTxn, smtPrefix)
return smt.LoadStore(smtdb, root)
}

View File

@ -47,7 +47,7 @@ func storeConfig123(t *testing.T) StoreConfig {
return opts
}
func newSubStoreWithData(t *testing.T, db dbm.DBConnection, storeData map[string]string) (*Store, types.KVStore) {
func newSubStoreWithData(t *testing.T, db dbm.Connection, storeData map[string]string) (*Store, types.KVStore) {
root, err := NewStore(db, simpleStoreConfig(t))
require.NoError(t, err)
@ -259,7 +259,7 @@ func TestCommit(t *testing.T) {
// test that we can recover from a failed commit
testFailedCommit := func(t *testing.T,
store *Store,
db dbm.DBConnection,
db dbm.Connection,
opts StoreConfig,
) {
if db == nil {
@ -299,7 +299,7 @@ func TestCommit(t *testing.T) {
// committed data that belongs to no version: non-atomic behavior from the Store user's perspective.
// So, that data must be reverted when the store is reloaded.
t.Run("recover after failed SaveVersion and Revert", func(t *testing.T) {
var db dbm.DBConnection
var db dbm.Connection
db = dbSaveVersionFails{memdb.NewDB()}
// Revert should succeed in initial NewStore call, but fail during Commit
db = dbRevertFails{db, []bool{false, true}}
@ -315,7 +315,7 @@ func TestCommit(t *testing.T) {
testFailedCommit(t, store, nil, opts)
})
t.Run("recover after failed StateCommitmentDB SaveVersion and Revert", func(t *testing.T) {
var db dbm.DBConnection
var db dbm.Connection
db = dbSaveVersionFails{memdb.NewDB()}
db = dbRevertFails{db, []bool{false, true}}
opts.StateCommitmentDB = db
@ -406,7 +406,7 @@ func TestPruning(t *testing.T) {
}
for tci, tc := range testCases {
dbs := []dbm.DBConnection{memdb.NewDB(), memdb.NewDB()}
dbs := []dbm.Connection{memdb.NewDB(), memdb.NewDB()}
opts := simpleStoreConfig(t)
opts.Pruning = tc.PruningOptions
opts.StateCommitmentDB = dbs[1]

View File

@ -73,7 +73,7 @@ type contentsIterator struct {
func (s *substore) newSubstoreIterator(source dbm.Iterator) *contentsIterator {
locker := s.root.mtx.RLocker()
locker.Lock()
return &contentsIterator{dbutil.DBToStoreIterator(source), locker}
return &contentsIterator{dbutil.ToStoreIterator(source), locker}
}
func (it *contentsIterator) Close() error {

View File

@ -8,34 +8,34 @@ import (
)
type (
dbDeleteVersionFails struct{ dbm.DBConnection }
dbRWCommitFails struct{ dbm.DBConnection }
dbRWCrudFails struct{ dbm.DBConnection }
dbSaveVersionFails struct{ dbm.DBConnection }
dbDeleteVersionFails struct{ dbm.Connection }
dbRWCommitFails struct{ dbm.Connection }
dbRWCrudFails struct{ dbm.Connection }
dbSaveVersionFails struct{ dbm.Connection }
dbRevertFails struct {
dbm.DBConnection
dbm.Connection
// order of calls to fail on (eg. [1, 0] => first call fails; second succeeds)
failOn []bool
}
)
type dbVersionsIs struct {
dbm.DBConnection
dbm.Connection
vset dbm.VersionSet
}
type (
dbVersionsFails struct{ dbm.DBConnection }
rwCommitFails struct{ dbm.DBReadWriter }
dbVersionsFails struct{ dbm.Connection }
rwCommitFails struct{ dbm.ReadWriter }
rwCrudFails struct {
dbm.DBReadWriter
dbm.ReadWriter
onKey []byte
}
)
func (dbVersionsFails) Versions() (dbm.VersionSet, error) { return nil, errors.New("dbVersionsFails") }
func (db dbVersionsIs) Versions() (dbm.VersionSet, error) { return db.vset, nil }
func (db dbRWCrudFails) ReadWriter() dbm.DBReadWriter {
return rwCrudFails{db.DBConnection.ReadWriter(), nil}
func (db dbRWCrudFails) ReadWriter() dbm.ReadWriter {
return rwCrudFails{db.Connection.ReadWriter(), nil}
}
func (dbSaveVersionFails) SaveVersion(uint64) error { return errors.New("dbSaveVersionFails") }
func (db dbRevertFails) Revert() error {
@ -46,7 +46,7 @@ func (db dbRevertFails) Revert() error {
if fail {
return errors.New("dbRevertFails")
}
return db.DBConnection.Revert()
return db.Connection.Revert()
}
func (dbDeleteVersionFails) DeleteVersion(uint64) error { return errors.New("dbDeleteVersionFails") }
func (tx rwCommitFails) Commit() error {
@ -54,34 +54,34 @@ func (tx rwCommitFails) Commit() error {
return errors.New("rwCommitFails")
}
func (db dbRWCommitFails) ReadWriter() dbm.DBReadWriter {
return rwCommitFails{db.DBConnection.ReadWriter()}
func (db dbRWCommitFails) ReadWriter() dbm.ReadWriter {
return rwCommitFails{db.Connection.ReadWriter()}
}
func (rw rwCrudFails) Get(k []byte) ([]byte, error) {
if rw.onKey == nil || bytes.Equal(rw.onKey, k) {
return nil, errors.New("rwCrudFails.Get")
}
return rw.DBReadWriter.Get(k)
return rw.ReadWriter.Get(k)
}
func (rw rwCrudFails) Has(k []byte) (bool, error) {
if rw.onKey == nil || bytes.Equal(rw.onKey, k) {
return false, errors.New("rwCrudFails.Has")
}
return rw.DBReadWriter.Has(k)
return rw.ReadWriter.Has(k)
}
func (rw rwCrudFails) Set(k []byte, v []byte) error {
if rw.onKey == nil || bytes.Equal(rw.onKey, k) {
return errors.New("rwCrudFails.Set")
}
return rw.DBReadWriter.Set(k, v)
return rw.ReadWriter.Set(k, v)
}
func (rw rwCrudFails) Delete(k []byte) error {
if rw.onKey == nil || bytes.Equal(rw.onKey, k) {
return errors.New("rwCrudFails.Delete")
}
return rw.DBReadWriter.Delete(k)
return rw.ReadWriter.Delete(k)
}

View File

@ -55,7 +55,7 @@ func (s *viewSubstore) Iterator(start, end []byte) types.Iterator {
if err != nil {
panic(err)
}
return dbutil.DBToStoreIterator(iter)
return dbutil.ToStoreIterator(iter)
}
// ReverseIterator implements KVStore.
@ -64,7 +64,7 @@ func (s *viewSubstore) ReverseIterator(start, end []byte) types.Iterator {
if err != nil {
panic(err)
}
return dbutil.DBToStoreIterator(iter)
return dbutil.ToStoreIterator(iter)
}
// GetStoreType implements Store.
@ -72,16 +72,16 @@ func (s *viewSubstore) GetStoreType() types.StoreType {
return types.StoreTypePersistent
}
func (st *viewSubstore) CacheWrap() types.CacheWrap {
return cachekv.NewStore(st)
func (s *viewSubstore) CacheWrap() types.CacheWrap {
return cachekv.NewStore(s)
}
func (st *viewSubstore) CacheWrapWithTrace(w io.Writer, tc types.TraceContext) types.CacheWrap {
return cachekv.NewStore(tracekv.NewStore(st, w, tc))
func (s *viewSubstore) CacheWrapWithTrace(w io.Writer, tc types.TraceContext) types.CacheWrap {
return cachekv.NewStore(tracekv.NewStore(s, w, tc))
}
func (st *viewSubstore) CacheWrapWithListeners(storeKey types.StoreKey, listeners []types.WriteListener) types.CacheWrap {
return cachekv.NewStore(listenkv.NewStore(st, storeKey, listeners))
func (s *viewSubstore) CacheWrapWithListeners(storeKey types.StoreKey, listeners []types.WriteListener) types.CacheWrap {
return cachekv.NewStore(listenkv.NewStore(s, storeKey, listeners))
}
func (s *viewStore) getMerkleRoots() (ret map[string][]byte, err error) {
@ -123,7 +123,7 @@ func (store *Store) getView(version int64) (ret *viewStore, err error) {
}()
}
// Now read this version's schema
schemaView := prefixdb.NewPrefixReader(stateView, schemaPrefix)
schemaView := prefixdb.NewReader(stateView, schemaPrefix)
defer func() {
if err != nil {
err = util.CombineErrors(err, schemaView.Discard(), "schemaView.Discard also failed")
@ -140,39 +140,39 @@ func (store *Store) getView(version int64) (ret *viewStore, err error) {
substoreCache: map[string]*viewSubstore{},
schema: pr.StoreSchema,
}
return
return ret, err
}
func (vs *viewStore) GetKVStore(skey types.StoreKey) types.KVStore {
func (s *viewStore) GetKVStore(skey types.StoreKey) types.KVStore {
key := skey.Name()
if _, has := vs.schema[key]; !has {
if _, has := s.schema[key]; !has {
panic(ErrStoreNotFound(key))
}
ret, err := vs.getSubstore(key)
ret, err := s.getSubstore(key)
if err != nil {
panic(err)
}
vs.substoreCache[key] = ret
s.substoreCache[key] = ret
return ret
}
// Reads but does not update substore cache
func (vs *viewStore) getSubstore(key string) (*viewSubstore, error) {
if cached, has := vs.substoreCache[key]; has {
func (s *viewStore) getSubstore(key string) (*viewSubstore, error) {
if cached, has := s.substoreCache[key]; has {
return cached, nil
}
pfx := substorePrefix(key)
stateR := prefixdb.NewPrefixReader(vs.stateView, pfx)
stateCommitmentR := prefixdb.NewPrefixReader(vs.stateCommitmentView, pfx)
stateR := prefixdb.NewReader(s.stateView, pfx)
stateCommitmentR := prefixdb.NewReader(s.stateCommitmentView, pfx)
rootHash, err := stateR.Get(merkleRootKey)
if err != nil {
return nil, err
}
return &viewSubstore{
root: vs,
root: s,
name: key,
dataBucket: prefixdb.NewPrefixReader(stateR, dataPrefix),
indexBucket: prefixdb.NewPrefixReader(stateR, indexPrefix),
dataBucket: prefixdb.NewReader(stateR, dataPrefix),
indexBucket: prefixdb.NewReader(stateR, indexPrefix),
stateCommitmentStore: loadSMT(dbm.ReaderAsReadWriter(stateCommitmentR), rootHash),
}, nil
}

View File

@ -31,7 +31,7 @@ func createIcs23Proof(store *Store, key []byte) (*ics23.CommitmentProof, error)
if err != nil {
return nil, err
}
ret.Proof = &ics23.CommitmentProof_Exist{&ics23.ExistenceProof{
ret.Proof = &ics23.CommitmentProof_Exist{Exist: &ics23.ExistenceProof{
Key: path[:],
Value: value,
Leaf: ics23.SmtSpec.LeafSpec,
@ -42,7 +42,7 @@ func createIcs23Proof(store *Store, key []byte) (*ics23.CommitmentProof, error)
if err != nil {
return nil, err
}
ret.Proof = &ics23.CommitmentProof_Nonexist{nonexist}
ret.Proof = &ics23.CommitmentProof_Nonexist{Nonexist: nonexist}
}
return ret, nil
}

View File

@ -30,19 +30,19 @@ var (
// Store Implements types.KVStore and CommitKVStore.
type Store struct {
tree *smt.SparseMerkleTree
values dbm.DBReadWriter
values dbm.ReadWriter
// Map hashed keys back to preimage
preimages dbm.DBReadWriter
preimages dbm.ReadWriter
}
// An smt.MapStore that wraps Get to raise smt.InvalidKeyError;
// smt.SparseMerkleTree expects this error to be returned when a key is not found
type dbMapStore struct{ dbm.DBReadWriter }
type dbMapStore struct{ dbm.ReadWriter }
func NewStore(db dbm.DBReadWriter) *Store {
nodes := prefix.NewPrefixReadWriter(db, nodesPrefix)
values := prefix.NewPrefixReadWriter(db, valuesPrefix)
preimages := prefix.NewPrefixReadWriter(db, preimagesPrefix)
func NewStore(db dbm.ReadWriter) *Store {
nodes := prefix.NewReadWriter(db, nodesPrefix)
values := prefix.NewReadWriter(db, valuesPrefix)
preimages := prefix.NewReadWriter(db, preimagesPrefix)
return &Store{
tree: smt.NewSparseMerkleTree(dbMapStore{nodes}, dbMapStore{values}, sha256.New()),
values: values,
@ -50,10 +50,10 @@ func NewStore(db dbm.DBReadWriter) *Store {
}
}
func LoadStore(db dbm.DBReadWriter, root []byte) *Store {
nodes := prefix.NewPrefixReadWriter(db, nodesPrefix)
values := prefix.NewPrefixReadWriter(db, valuesPrefix)
preimages := prefix.NewPrefixReadWriter(db, preimagesPrefix)
func LoadStore(db dbm.ReadWriter, root []byte) *Store {
nodes := prefix.NewReadWriter(db, nodesPrefix)
values := prefix.NewReadWriter(db, valuesPrefix)
preimages := prefix.NewReadWriter(db, preimagesPrefix)
return &Store{
tree: smt.ImportSparseMerkleTree(dbMapStore{nodes}, dbMapStore{values}, sha256.New(), root),
values: values,
@ -132,12 +132,12 @@ func (s *Store) Delete(key []byte) {
}
func (ms dbMapStore) Get(key []byte) ([]byte, error) {
val, err := ms.DBReadWriter.Get(key)
val, err := ms.ReadWriter.Get(key)
if err != nil {
return nil, err
}
if val == nil {
return nil, &smt.InvalidKeyError{key}
return nil, &smt.InvalidKeyError{Key: key}
}
return val, nil
}

View File

@ -16,7 +16,7 @@ var (
// Store is a wrapper for a memory store which does not persist data.
type Store struct {
dbadapter.Store
conn dbm.DBConnection
conn dbm.Connection
}
// NewStore constructs a new transient store.

View File

@ -11,31 +11,31 @@ import (
gomock "github.com/golang/mock/gomock"
)
// MockDBConnection is a mock of DBConnection interface.
type MockDBConnection struct {
// MockConnection is a mock of Connection interface.
type MockConnection struct {
ctrl *gomock.Controller
recorder *MockDBConnectionMockRecorder
recorder *MockConnectionMockRecorder
}
// MockDBConnectionMockRecorder is the mock recorder for MockDBConnection.
type MockDBConnectionMockRecorder struct {
mock *MockDBConnection
// MockConnectionMockRecorder is the mock recorder for MockConnection.
type MockConnectionMockRecorder struct {
mock *MockConnection
}
// NewMockDBConnection creates a new mock instance.
func NewMockDBConnection(ctrl *gomock.Controller) *MockDBConnection {
mock := &MockDBConnection{ctrl: ctrl}
mock.recorder = &MockDBConnectionMockRecorder{mock}
// NewMockConnection creates a new mock instance.
func NewMockConnection(ctrl *gomock.Controller) *MockConnection {
mock := &MockConnection{ctrl: ctrl}
mock.recorder = &MockConnectionMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use.
func (m *MockDBConnection) EXPECT() *MockDBConnectionMockRecorder {
func (m *MockConnection) EXPECT() *MockConnectionMockRecorder {
return m.recorder
}
// Close mocks base method.
func (m *MockDBConnection) Close() error {
func (m *MockConnection) Close() error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Close")
ret0, _ := ret[0].(error)
@ -43,13 +43,13 @@ func (m *MockDBConnection) Close() error {
}
// Close indicates an expected call of Close.
func (mr *MockDBConnectionMockRecorder) Close() *gomock.Call {
func (mr *MockConnectionMockRecorder) Close() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockDBConnection)(nil).Close))
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockConnection)(nil).Close))
}
// DeleteVersion mocks base method.
func (m *MockDBConnection) DeleteVersion(arg0 uint64) error {
func (m *MockConnection) DeleteVersion(arg0 uint64) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "DeleteVersion", arg0)
ret0, _ := ret[0].(error)
@ -57,56 +57,56 @@ func (m *MockDBConnection) DeleteVersion(arg0 uint64) error {
}
// DeleteVersion indicates an expected call of DeleteVersion.
func (mr *MockDBConnectionMockRecorder) DeleteVersion(arg0 interface{}) *gomock.Call {
func (mr *MockConnectionMockRecorder) DeleteVersion(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteVersion", reflect.TypeOf((*MockDBConnection)(nil).DeleteVersion), arg0)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteVersion", reflect.TypeOf((*MockConnection)(nil).DeleteVersion), arg0)
}
// ReadWriter mocks base method.
func (m *MockDBConnection) ReadWriter() db.DBReadWriter {
func (m *MockConnection) ReadWriter() db.ReadWriter {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ReadWriter")
ret0, _ := ret[0].(db.DBReadWriter)
ret0, _ := ret[0].(db.ReadWriter)
return ret0
}
// ReadWriter indicates an expected call of ReadWriter.
func (mr *MockDBConnectionMockRecorder) ReadWriter() *gomock.Call {
func (mr *MockConnectionMockRecorder) ReadWriter() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReadWriter", reflect.TypeOf((*MockDBConnection)(nil).ReadWriter))
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReadWriter", reflect.TypeOf((*MockConnection)(nil).ReadWriter))
}
// Reader mocks base method.
func (m *MockDBConnection) Reader() db.DBReader {
func (m *MockConnection) Reader() db.Reader {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Reader")
ret0, _ := ret[0].(db.DBReader)
ret0, _ := ret[0].(db.Reader)
return ret0
}
// Reader indicates an expected call of Reader.
func (mr *MockDBConnectionMockRecorder) Reader() *gomock.Call {
func (mr *MockConnectionMockRecorder) Reader() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Reader", reflect.TypeOf((*MockDBConnection)(nil).Reader))
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Reader", reflect.TypeOf((*MockConnection)(nil).Reader))
}
// ReaderAt mocks base method.
func (m *MockDBConnection) ReaderAt(arg0 uint64) (db.DBReader, error) {
func (m *MockConnection) ReaderAt(arg0 uint64) (db.Reader, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "ReaderAt", arg0)
ret0, _ := ret[0].(db.DBReader)
ret0, _ := ret[0].(db.Reader)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ReaderAt indicates an expected call of ReaderAt.
func (mr *MockDBConnectionMockRecorder) ReaderAt(arg0 interface{}) *gomock.Call {
func (mr *MockConnectionMockRecorder) ReaderAt(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReaderAt", reflect.TypeOf((*MockDBConnection)(nil).ReaderAt), arg0)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReaderAt", reflect.TypeOf((*MockConnection)(nil).ReaderAt), arg0)
}
// Revert mocks base method.
func (m *MockDBConnection) Revert() error {
func (m *MockConnection) Revert() error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Revert")
ret0, _ := ret[0].(error)
@ -114,13 +114,13 @@ func (m *MockDBConnection) Revert() error {
}
// Revert indicates an expected call of Revert.
func (mr *MockDBConnectionMockRecorder) Revert() *gomock.Call {
func (mr *MockConnectionMockRecorder) Revert() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Revert", reflect.TypeOf((*MockDBConnection)(nil).Revert))
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Revert", reflect.TypeOf((*MockConnection)(nil).Revert))
}
// SaveNextVersion mocks base method.
func (m *MockDBConnection) SaveNextVersion() (uint64, error) {
func (m *MockConnection) SaveNextVersion() (uint64, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "SaveNextVersion")
ret0, _ := ret[0].(uint64)
@ -129,13 +129,13 @@ func (m *MockDBConnection) SaveNextVersion() (uint64, error) {
}
// SaveNextVersion indicates an expected call of SaveNextVersion.
func (mr *MockDBConnectionMockRecorder) SaveNextVersion() *gomock.Call {
func (mr *MockConnectionMockRecorder) SaveNextVersion() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SaveNextVersion", reflect.TypeOf((*MockDBConnection)(nil).SaveNextVersion))
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SaveNextVersion", reflect.TypeOf((*MockConnection)(nil).SaveNextVersion))
}
// SaveVersion mocks base method.
func (m *MockDBConnection) SaveVersion(arg0 uint64) error {
func (m *MockConnection) SaveVersion(arg0 uint64) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "SaveVersion", arg0)
ret0, _ := ret[0].(error)
@ -143,13 +143,13 @@ func (m *MockDBConnection) SaveVersion(arg0 uint64) error {
}
// SaveVersion indicates an expected call of SaveVersion.
func (mr *MockDBConnectionMockRecorder) SaveVersion(arg0 interface{}) *gomock.Call {
func (mr *MockConnectionMockRecorder) SaveVersion(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SaveVersion", reflect.TypeOf((*MockDBConnection)(nil).SaveVersion), arg0)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SaveVersion", reflect.TypeOf((*MockConnection)(nil).SaveVersion), arg0)
}
// Versions mocks base method.
func (m *MockDBConnection) Versions() (db.VersionSet, error) {
func (m *MockConnection) Versions() (db.VersionSet, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Versions")
ret0, _ := ret[0].(db.VersionSet)
@ -158,23 +158,23 @@ func (m *MockDBConnection) Versions() (db.VersionSet, error) {
}
// Versions indicates an expected call of Versions.
func (mr *MockDBConnectionMockRecorder) Versions() *gomock.Call {
func (mr *MockConnectionMockRecorder) Versions() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Versions", reflect.TypeOf((*MockDBConnection)(nil).Versions))
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Versions", reflect.TypeOf((*MockConnection)(nil).Versions))
}
// Writer mocks base method.
func (m *MockDBConnection) Writer() db.DBWriter {
func (m *MockConnection) Writer() db.Writer {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Writer")
ret0, _ := ret[0].(db.DBWriter)
ret0, _ := ret[0].(db.Writer)
return ret0
}
// Writer indicates an expected call of Writer.
func (mr *MockDBConnectionMockRecorder) Writer() *gomock.Call {
func (mr *MockConnectionMockRecorder) Writer() *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Writer", reflect.TypeOf((*MockDBConnection)(nil).Writer))
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Writer", reflect.TypeOf((*MockConnection)(nil).Writer))
}
// MockDBReader is a mock of DBReader interface.

View File

@ -33,7 +33,7 @@ func RegisterInterfaces(registry types.InterfaceRegistry) {
&HasHasAnimal{},
)
registry.RegisterImplementations(
(*tx.TxExtensionOptionI)(nil),
(*tx.ExtensionOptionI)(nil),
&Cat{},
)

View File

@ -195,7 +195,7 @@ func (aa AccAddress) Equals(aa2 Address) bool {
// Returns boolean for whether an AccAddress is empty
func (aa AccAddress) Empty() bool {
return aa == nil || len(aa) == 0
return len(aa) == 0
}
// Marshal returns the raw address bytes. It is needed for protobuf
@ -343,7 +343,7 @@ func (va ValAddress) Equals(va2 Address) bool {
// Returns boolean for whether an AccAddress is empty
func (va ValAddress) Empty() bool {
return va == nil || len(va) == 0
return len(va) == 0
}
// Marshal returns the raw address bytes. It is needed for protobuf
@ -498,7 +498,7 @@ func (ca ConsAddress) Equals(ca2 Address) bool {
// Returns boolean for whether an ConsAddress is empty
func (ca ConsAddress) Empty() bool {
return ca == nil || len(ca) == 0
return len(ca) == 0
}
// Marshal returns the raw address bytes. It is needed for protobuf

View File

@ -5,12 +5,12 @@ import (
)
// TxExtensionOptionI defines the interface for tx extension options
type TxExtensionOptionI interface{}
type ExtensionOptionI interface{}
// unpackTxExtensionOptionsI unpacks Any's to TxExtensionOptionI's.
func unpackTxExtensionOptionsI(unpacker types.AnyUnpacker, anys []*types.Any) error {
for _, any := range anys {
var opt TxExtensionOptionI
var opt ExtensionOptionI
err := unpacker.UnpackAny(any, &opt)
if err != nil {
return err

View File

@ -215,5 +215,5 @@ func RegisterInterfaces(registry codectypes.InterfaceRegistry) {
registry.RegisterInterface("cosmos.tx.v1beta1.Tx", (*sdk.Tx)(nil))
registry.RegisterImplementations((*sdk.Tx)(nil), &Tx{})
registry.RegisterInterface("cosmos.tx.v1beta1.TxExtensionOptionI", (*TxExtensionOptionI)(nil))
registry.RegisterInterface("cosmos.tx.v1beta1.TxExtensionOptionI", (*ExtensionOptionI)(nil))
}

View File

@ -57,7 +57,7 @@ func (dfd DeductFeeDecorator) checkDeductFee(ctx sdk.Context, sdkTx sdk.Tx, fee
}
if addr := dfd.accountKeeper.GetModuleAddress(types.FeeCollectorName); addr == nil {
return fmt.Errorf("Fee collector module account (%s) has not been set", types.FeeCollectorName)
return fmt.Errorf("fee collector module account (%s) has not been set", types.FeeCollectorName)
}
feePayer := feeTx.FeePayer()

View File

@ -106,6 +106,7 @@ func (tx StdTx) GetMsgs() []sdk.Msg { return tx.Msgs }
// ValidateBasic does a simple and lightweight validation check that doesn't
// require access to any other information.
//nolint:revive // we need to change the receiver name here, because otherwise we conflict with tx.MaxGasWanted.
func (stdTx StdTx) ValidateBasic() error {
stdSigs := stdTx.GetSignatures()

View File

@ -259,15 +259,15 @@ func (k Keeper) IterateGrants(ctx sdk.Context,
}
}
func (keeper Keeper) getGrantQueueItem(ctx sdk.Context, expiration time.Time, granter, grantee sdk.AccAddress) (*authz.GrantQueueItem, error) {
store := ctx.KVStore(keeper.storeKey)
func (k Keeper) getGrantQueueItem(ctx sdk.Context, expiration time.Time, granter, grantee sdk.AccAddress) (*authz.GrantQueueItem, error) {
store := ctx.KVStore(k.storeKey)
bz := store.Get(GrantQueueKey(expiration, granter, grantee))
if bz == nil {
return &authz.GrantQueueItem{}, nil
}
var queueItems authz.GrantQueueItem
if err := keeper.cdc.Unmarshal(bz, &queueItems); err != nil {
if err := k.cdc.Unmarshal(bz, &queueItems); err != nil {
return nil, err
}
return &queueItems, nil
@ -287,27 +287,27 @@ func (k Keeper) setGrantQueueItem(ctx sdk.Context, expiration time.Time,
}
// insertIntoGrantQueue inserts a grant key into the grant queue
func (keeper Keeper) insertIntoGrantQueue(ctx sdk.Context, granter, grantee sdk.AccAddress, msgType string, expiration time.Time) error {
queueItems, err := keeper.getGrantQueueItem(ctx, expiration, granter, grantee)
func (k Keeper) insertIntoGrantQueue(ctx sdk.Context, granter, grantee sdk.AccAddress, msgType string, expiration time.Time) error {
queueItems, err := k.getGrantQueueItem(ctx, expiration, granter, grantee)
if err != nil {
return err
}
if len(queueItems.MsgTypeUrls) == 0 {
keeper.setGrantQueueItem(ctx, expiration, granter, grantee, &authz.GrantQueueItem{
k.setGrantQueueItem(ctx, expiration, granter, grantee, &authz.GrantQueueItem{
MsgTypeUrls: []string{msgType},
})
} else {
queueItems.MsgTypeUrls = append(queueItems.MsgTypeUrls, msgType)
keeper.setGrantQueueItem(ctx, expiration, granter, grantee, queueItems)
k.setGrantQueueItem(ctx, expiration, granter, grantee, queueItems)
}
return nil
}
// removeFromGrantQueue removes a grant key from the grant queue
func (keeper Keeper) removeFromGrantQueue(ctx sdk.Context, grantKey []byte, granter, grantee sdk.AccAddress, expiration time.Time) error {
store := ctx.KVStore(keeper.storeKey)
func (k Keeper) removeFromGrantQueue(ctx sdk.Context, grantKey []byte, granter, grantee sdk.AccAddress, expiration time.Time) error {
store := ctx.KVStore(k.storeKey)
key := GrantQueueKey(expiration, granter, grantee)
bz := store.Get(key)
if bz == nil {
@ -315,7 +315,7 @@ func (keeper Keeper) removeFromGrantQueue(ctx sdk.Context, grantKey []byte, gran
}
var queueItem authz.GrantQueueItem
if err := keeper.cdc.Unmarshal(bz, &queueItem); err != nil {
if err := k.cdc.Unmarshal(bz, &queueItem); err != nil {
return err
}
@ -330,7 +330,7 @@ func (keeper Keeper) removeFromGrantQueue(ctx sdk.Context, grantKey []byte, gran
queueItems[index] = queueItems[end]
queueItems = queueItems[:end]
if err := keeper.setGrantQueueItem(ctx, expiration, granter, grantee, &authz.GrantQueueItem{
if err := k.setGrantQueueItem(ctx, expiration, granter, grantee, &authz.GrantQueueItem{
MsgTypeUrls: queueItems,
}); err != nil {
return err

View File

@ -26,6 +26,7 @@ var (
)
// Simulation operation weights constants
//nolint:gosec // these are not hardcoded credentials.
const (
OpWeightMsgGrant = "op_weight_msg_grant"
OpWeightRevoke = "op_weight_msg_revoke"
@ -262,9 +263,9 @@ func SimulateMsgExec(ak authz.AccountKeeper, bk authz.BankKeeper, k keeper.Keepe
if err != nil {
if sdkerrors.ErrInsufficientFunds.Is(err) {
return simtypes.NoOpMsg(authz.ModuleName, TypeMsgExec, err.Error()), nil, nil
} else {
return simtypes.NoOpMsg(authz.ModuleName, TypeMsgExec, err.Error()), nil, err
}
return simtypes.NoOpMsg(authz.ModuleName, TypeMsgExec, err.Error()), nil, err
}
msgExec := authz.NewMsgExec(granteeAddr, msg)

View File

@ -17,6 +17,7 @@ import (
)
// Simulation operation weights constants
//nolint:gosec // these are not hardcoded credentials.
const (
OpWeightMsgSend = "op_weight_msg_send"
OpWeightMsgMultiSend = "op_weight_msg_multisend"

View File

@ -7,7 +7,7 @@ import (
paramtypes "github.com/cosmos/cosmos-sdk/x/params/types"
)
// key for constant fee parameter
// ParamStoreKeyConstantFee is the constant fee parameter
var ParamStoreKeyConstantFee = []byte("ConstantFee")
// type declaration for parameters

View File

@ -16,6 +16,7 @@ import (
)
// Simulation operation weights constants
//nolint:gosec // these are not hardcoded credentials.
const (
OpWeightMsgSetWithdrawAddress = "op_weight_msg_set_withdraw_address"
OpWeightMsgWithdrawDelegationReward = "op_weight_msg_withdraw_delegation_reward"

View File

@ -781,7 +781,8 @@ func (s *IntegrationTestSuite) TestTxWithFeeGrant() {
from: grantee.String(),
flags: []string{
fmt.Sprintf("--%s=%s", flags.FlagFeePayer, grantee.String()),
fmt.Sprintf("--%s=%s", flags.FlagFeeGranter, granter.String())},
fmt.Sprintf("--%s=%s", flags.FlagFeeGranter, granter.String()),
},
},
}
@ -798,7 +799,6 @@ func (s *IntegrationTestSuite) TestTxWithFeeGrant() {
s.Require().Equal(tc.expErrCode, resp.Code, resp)
})
}
}
func (s *IntegrationTestSuite) TestFilteredFeeAllowance() {

View File

@ -14,6 +14,7 @@ import (
)
// Simulation operation weights constants
//nolint:gosec // These aren't harcoded credentials.
const (
OpWeightMsgGrantAllowance = "op_weight_msg_grant_fee_allowance"
OpWeightMsgRevokeAllowance = "op_weight_msg_grant_revoke_allowance"

View File

@ -18,6 +18,7 @@ var commonArgs = []string{
}
// MsgSubmitLegacyProposal creates a tx for submit legacy proposal
//nolint:staticcheck // we are intentionally using a deprecated flag here.
func MsgSubmitLegacyProposal(clientCtx client.Context, from, title, description, proposalType string, extraArgs ...string) (testutil.BufferWriter, error) {
args := append([]string{
fmt.Sprintf("--%s=%s", govcli.FlagTitle, title),

View File

@ -188,7 +188,7 @@ func (s *IntegrationTestSuite) TestNewCmdSubmitLegacyProposal() {
{
"invalid proposal (file)",
[]string{
fmt.Sprintf("--%s=%s", cli.FlagProposal, invalidPropFile.Name()),
fmt.Sprintf("--%s=%s", cli.FlagProposal, invalidPropFile.Name()), //nolint:staticcheck // we are intentionally using a deprecated flag here.
fmt.Sprintf("--%s=%s", flags.FlagFrom, val.Address.String()),
fmt.Sprintf("--%s=true", flags.FlagSkipConfirmation),
fmt.Sprintf("--%s=%s", flags.FlagFees, sdk.NewCoins(sdk.NewCoin(s.cfg.BondDenom, sdk.NewInt(10))).String()),
@ -198,8 +198,8 @@ func (s *IntegrationTestSuite) TestNewCmdSubmitLegacyProposal() {
{
"invalid proposal",
[]string{
fmt.Sprintf("--%s='Where is the title!?'", cli.FlagDescription),
fmt.Sprintf("--%s=%s", cli.FlagProposalType, v1beta1.ProposalTypeText),
fmt.Sprintf("--%s='Where is the title!?'", cli.FlagDescription), //nolint:staticcheck // we are intentionally using a deprecated flag here.
fmt.Sprintf("--%s=%s", cli.FlagProposalType, v1beta1.ProposalTypeText), //nolint:staticcheck // we are intentionally using a deprecated flag here.
fmt.Sprintf("--%s=%s", cli.FlagDeposit, sdk.NewCoin(s.cfg.BondDenom, sdk.NewInt(5431)).String()),
fmt.Sprintf("--%s=%s", flags.FlagFrom, val.Address.String()),
fmt.Sprintf("--%s=true", flags.FlagSkipConfirmation),
@ -209,6 +209,7 @@ func (s *IntegrationTestSuite) TestNewCmdSubmitLegacyProposal() {
},
{
"valid transaction (file)",
//nolint:staticcheck // we are intentionally using a deprecated flag here.
[]string{
fmt.Sprintf("--%s=%s", cli.FlagProposal, validPropFile.Name()),
fmt.Sprintf("--%s=%s", flags.FlagFrom, val.Address.String()),
@ -221,9 +222,9 @@ func (s *IntegrationTestSuite) TestNewCmdSubmitLegacyProposal() {
{
"valid transaction",
[]string{
fmt.Sprintf("--%s='Text Proposal'", cli.FlagTitle),
fmt.Sprintf("--%s='Where is the title!?'", cli.FlagDescription),
fmt.Sprintf("--%s=%s", cli.FlagProposalType, v1beta1.ProposalTypeText),
fmt.Sprintf("--%s='Text Proposal'", cli.FlagTitle), //nolint:staticcheck // we are intentionally using a deprecated flag here.
fmt.Sprintf("--%s='Where is the title!?'", cli.FlagDescription), //nolint:staticcheck // we are intentionally using a deprecated flag here.
fmt.Sprintf("--%s=%s", cli.FlagProposalType, v1beta1.ProposalTypeText), //nolint:staticcheck // we are intentionally using a deprecated flag here.
fmt.Sprintf("--%s=%s", cli.FlagDeposit, sdk.NewCoin(s.cfg.BondDenom, sdk.NewInt(5431)).String()),
fmt.Sprintf("--%s=%s", flags.FlagFrom, val.Address.String()),
fmt.Sprintf("--%s=true", flags.FlagSkipConfirmation),

View File

@ -199,8 +199,8 @@ func (keeper Keeper) InactiveProposalQueueIterator(ctx sdk.Context, endTime time
// assertMetadataLength returns an error if given metadata length
// is greater than a pre-defined maxMetadataLen.
func (k Keeper) assertMetadataLength(metadata string) error {
if metadata != "" && uint64(len(metadata)) > k.config.MaxMetadataLen {
func (keeper Keeper) assertMetadataLength(metadata string) error {
if metadata != "" && uint64(len(metadata)) > keeper.config.MaxMetadataLen {
return types.ErrMetadataTooLong.Wrapf("got metadata with length %d", len(metadata))
}
return nil

View File

@ -28,6 +28,7 @@ var (
)
// Simulation operation weights constants
//nolint:gosec // these are not hard-coded credentials.
const (
OpWeightMsgDeposit = "op_weight_msg_deposit"
OpWeightMsgVote = "op_weight_msg_vote"
@ -436,7 +437,7 @@ func randomProposalID(r *rand.Rand, k keeper.Keeper,
}
proposal, ok := k.GetProposal(ctx, proposalID)
if !ok || v1.ProposalStatus(proposal.Status) != status {
if !ok || proposal.Status != status {
return proposalID, false
}

View File

@ -256,7 +256,7 @@ func (c MsgExecLegacyContent) ValidateBasic() error {
}
// UnpackInterfaces implements UnpackInterfacesMessage.UnpackInterfaces
func (m MsgExecLegacyContent) UnpackInterfaces(unpacker codectypes.AnyUnpacker) error {
func (c MsgExecLegacyContent) UnpackInterfaces(unpacker codectypes.AnyUnpacker) error {
var content v1beta1.Content
return unpacker.UnpackAny(m.Content, &content)
return unpacker.UnpackAny(c.Content, &content)
}

View File

@ -1,7 +1,6 @@
package v1beta1
import (
"fmt"
"time"
"sigs.k8s.io/yaml"
@ -49,22 +48,6 @@ func (dp DepositParams) Equal(dp2 DepositParams) bool {
return dp.MinDeposit.IsEqual(dp2.MinDeposit) && dp.MaxDepositPeriod == dp2.MaxDepositPeriod
}
func validateDepositParams(i interface{}) error {
v, ok := i.(DepositParams)
if !ok {
return fmt.Errorf("invalid parameter type: %T", i)
}
if !v.MinDeposit.IsValid() {
return fmt.Errorf("invalid minimum deposit: %s", v.MinDeposit)
}
if v.MaxDepositPeriod <= 0 {
return fmt.Errorf("maximum deposit period must be positive: %d", v.MaxDepositPeriod)
}
return nil
}
// NewTallyParams creates a new TallyParams object
func NewTallyParams(quorum, threshold, vetoThreshold sdk.Dec) TallyParams {
return TallyParams{
@ -90,34 +73,6 @@ func (tp TallyParams) String() string {
return string(out)
}
func validateTallyParams(i interface{}) error {
v, ok := i.(TallyParams)
if !ok {
return fmt.Errorf("invalid parameter type: %T", i)
}
if v.Quorum.IsNegative() {
return fmt.Errorf("quorom cannot be negative: %s", v.Quorum)
}
if v.Quorum.GT(sdk.OneDec()) {
return fmt.Errorf("quorom too large: %s", v)
}
if !v.Threshold.IsPositive() {
return fmt.Errorf("vote threshold must be positive: %s", v.Threshold)
}
if v.Threshold.GT(sdk.OneDec()) {
return fmt.Errorf("vote threshold too large: %s", v)
}
if !v.VetoThreshold.IsPositive() {
return fmt.Errorf("veto threshold must be positive: %s", v.Threshold)
}
if v.VetoThreshold.GT(sdk.OneDec()) {
return fmt.Errorf("veto threshold too large: %s", v)
}
return nil
}
// NewVotingParams creates a new VotingParams object
func NewVotingParams(votingPeriod time.Duration) VotingParams {
return VotingParams{
@ -141,19 +96,6 @@ func (vp VotingParams) String() string {
return string(out)
}
func validateVotingParams(i interface{}) error {
v, ok := i.(VotingParams)
if !ok {
return fmt.Errorf("invalid parameter type: %T", i)
}
if v.VotingPeriod <= 0 {
return fmt.Errorf("voting period must be positive: %s", v.VotingPeriod)
}
return nil
}
// Params returns all of the governance params
type Params struct {
VotingParams VotingParams `json:"voting_params" yaml:"voting_params"`

View File

@ -40,7 +40,7 @@ func execFromString(execStr string) group.Exec {
}
// CLIProposal defines a Msg-based group proposal for CLI purposes.
type CLIProposal struct {
type Proposal struct {
GroupPolicyAddress string `json:"group_policy_address"`
// Messages defines an array of sdk.Msgs proto-JSON-encoded as Anys.
Messages []json.RawMessage `json:"messages"`
@ -48,25 +48,25 @@ type CLIProposal struct {
Proposers []string `json:"proposers"`
}
func getCLIProposal(path string) (CLIProposal, error) {
func getCLIProposal(path string) (Proposal, error) {
contents, err := os.ReadFile(path)
if err != nil {
return CLIProposal{}, err
return Proposal{}, err
}
return parseCLIProposal(contents)
}
func parseCLIProposal(contents []byte) (CLIProposal, error) {
var p CLIProposal
func parseCLIProposal(contents []byte) (Proposal, error) {
var p Proposal
if err := json.Unmarshal(contents, &p); err != nil {
return CLIProposal{}, err
return Proposal{}, err
}
return p, nil
}
func parseMsgs(cdc codec.Codec, p CLIProposal) ([]sdk.Msg, error) {
func parseMsgs(cdc codec.Codec, p Proposal) ([]sdk.Msg, error) {
msgs := make([]sdk.Msg, len(p.Messages))
for i, anyJSON := range p.Messages {
var msg sdk.Msg

View File

@ -2446,7 +2446,7 @@ func (s *IntegrationTestSuite) createCLIProposal(groupPolicyAddress, proposer, s
msgJSON, err := s.cfg.Codec.MarshalInterfaceJSON(&msg)
s.Require().NoError(err)
p := client.CLIProposal{
p := client.Proposal{
GroupPolicyAddress: groupPolicyAddress,
Messages: []json.RawMessage{msgJSON},
Metadata: metadata,

View File

@ -5,7 +5,6 @@ import (
"github.com/cosmos/cosmos-sdk/store"
"github.com/cosmos/cosmos-sdk/store/gaskv"
"github.com/cosmos/cosmos-sdk/store/types"
storetypes "github.com/cosmos/cosmos-sdk/store/types"
sdk "github.com/cosmos/cosmos-sdk/types"
dbm "github.com/tendermint/tm-db"
@ -13,7 +12,7 @@ import (
type MockContext struct {
db *dbm.MemDB
store types.CommitMultiStore
store storetypes.CommitMultiStore
}
func NewMockContext() *MockContext {
@ -36,18 +35,18 @@ func (m MockContext) KVStore(key storetypes.StoreKey) sdk.KVStore {
}
type debuggingGasMeter struct {
g types.GasMeter
g storetypes.GasMeter
}
func (d debuggingGasMeter) GasConsumed() types.Gas {
func (d debuggingGasMeter) GasConsumed() storetypes.Gas {
return d.g.GasConsumed()
}
func (d debuggingGasMeter) GasRemaining() types.Gas {
func (d debuggingGasMeter) GasRemaining() storetypes.Gas {
return d.g.GasRemaining()
}
func (d debuggingGasMeter) GasConsumedToLimit() types.Gas {
func (d debuggingGasMeter) GasConsumedToLimit() storetypes.Gas {
return d.g.GasConsumedToLimit()
}
@ -55,11 +54,11 @@ func (d debuggingGasMeter) RefundGas(amount uint64, descriptor string) {
d.g.RefundGas(amount, descriptor)
}
func (d debuggingGasMeter) Limit() types.Gas {
func (d debuggingGasMeter) Limit() storetypes.Gas {
return d.g.Limit()
}
func (d debuggingGasMeter) ConsumeGas(amount types.Gas, descriptor string) {
func (d debuggingGasMeter) ConsumeGas(amount storetypes.Gas, descriptor string) {
fmt.Printf("++ Consuming gas: %q :%d\n", descriptor, amount)
d.g.ConsumeGas(amount, descriptor)
}
@ -87,14 +86,14 @@ func NewGasCountingMockContext() *GasCountingMockContext {
}
func (g GasCountingMockContext) KVStore(store sdk.KVStore) sdk.KVStore {
return gaskv.NewStore(store, g.GasMeter, types.KVGasConfig())
return gaskv.NewStore(store, g.GasMeter, storetypes.KVGasConfig())
}
func (g GasCountingMockContext) GasConsumed() types.Gas {
func (g GasCountingMockContext) GasConsumed() storetypes.Gas {
return g.GasMeter.GasConsumed()
}
func (g GasCountingMockContext) GasRemaining() types.Gas {
func (g GasCountingMockContext) GasRemaining() storetypes.Gas {
return g.GasMeter.GasRemaining()
}

View File

@ -42,6 +42,7 @@ var (
)
// Simulation operation weights constants
//nolint:gosec // these are not hardcoded credentials.
const (
OpMsgCreateGroup = "op_weight_msg_create_group"
OpMsgUpdateGroupAdmin = "op_weight_msg_update_group_admin"
@ -432,7 +433,7 @@ func SimulateMsgSubmitProposal(ak group.AccountKeeper, bk group.BankKeeper, k ke
// Pick a random member from the group
ctx := sdk.WrapSDKContext(sdkCtx)
acc, account, err := randomMember(r, k, ak, ctx, accounts, groupID)
acc, account, err := randomMember(ctx, r, k, ak, accounts, groupID)
if err != nil {
return simtypes.NoOpMsg(group.ModuleName, TypeMsgSubmitProposal, ""), nil, err
}
@ -962,7 +963,7 @@ func SimulateMsgVote(ak group.AccountKeeper,
// Pick a random member from the group
ctx := sdk.WrapSDKContext(sdkCtx)
acc, account, err := randomMember(r, k, ak, ctx, accounts, g.Id)
acc, account, err := randomMember(ctx, r, k, ak, accounts, g.Id)
if err != nil {
return simtypes.NoOpMsg(group.ModuleName, TypeMsgVote, ""), nil, err
}
@ -1139,7 +1140,7 @@ func SimulateMsgLeaveGroup(k keeper.Keeper, ak group.AccountKeeper, bk group.Ban
}
// Pick a random member from the group
acc, account, err := randomMember(r, k, ak, ctx, accounts, groupInfo.Id)
acc, account, err := randomMember(ctx, r, k, ak, accounts, groupInfo.Id)
if err != nil {
return simtypes.NoOpMsg(group.ModuleName, TypeMsgLeaveGroup, ""), nil, err
}
@ -1252,8 +1253,8 @@ func randomGroupPolicy(r *rand.Rand, k keeper.Keeper, ak group.AccountKeeper,
return groupInfo, groupPolicyInfo, acc, account, nil
}
func randomMember(r *rand.Rand, k keeper.Keeper, ak group.AccountKeeper,
ctx context.Context, accounts []simtypes.Account, groupID uint64,
func randomMember(ctx context.Context, r *rand.Rand, k keeper.Keeper, ak group.AccountKeeper,
accounts []simtypes.Account, groupID uint64,
) (acc simtypes.Account, account authtypes.AccountI, err error) {
res, err := k.GroupMembers(ctx, &group.QueryGroupMembersRequest{
GroupId: groupID,
@ -1280,9 +1281,8 @@ func randIntInRange(r *rand.Rand, l int) int {
}
if l == 1 {
return 0
} else {
return simtypes.RandIntBetween(r, 0, l-1)
}
return simtypes.RandIntBetween(r, 0, l-1)
}
func findAccount(accounts []simtypes.Account, addr string) (idx int) {

View File

@ -368,33 +368,33 @@ func MemberToMemberRequest(m *Member) MemberRequest {
}
}
func (p Proposal) ValidateBasic() error {
if p.Id == 0 {
func (g Proposal) ValidateBasic() error {
if g.Id == 0 {
return sdkerrors.Wrap(errors.ErrEmpty, "proposal id")
}
_, err := sdk.AccAddressFromBech32(p.GroupPolicyAddress)
_, err := sdk.AccAddressFromBech32(g.GroupPolicyAddress)
if err != nil {
return sdkerrors.Wrap(err, "proposal group policy address")
}
if p.GroupVersion == 0 {
if g.GroupVersion == 0 {
return sdkerrors.Wrap(errors.ErrEmpty, "proposal group version")
}
if p.GroupPolicyVersion == 0 {
if g.GroupPolicyVersion == 0 {
return sdkerrors.Wrap(errors.ErrEmpty, "proposal group policy version")
}
_, err = p.FinalTallyResult.GetYesCount()
_, err = g.FinalTallyResult.GetYesCount()
if err != nil {
return sdkerrors.Wrap(err, "proposal FinalTallyResult yes count")
}
_, err = p.FinalTallyResult.GetNoCount()
_, err = g.FinalTallyResult.GetNoCount()
if err != nil {
return sdkerrors.Wrap(err, "proposal FinalTallyResult no count")
}
_, err = p.FinalTallyResult.GetAbstainCount()
_, err = g.FinalTallyResult.GetAbstainCount()
if err != nil {
return sdkerrors.Wrap(err, "proposal FinalTallyResult abstain count")
}
_, err = p.FinalTallyResult.GetNoWithVetoCount()
_, err = g.FinalTallyResult.GetNoWithVetoCount()
if err != nil {
return sdkerrors.Wrap(err, "proposal FinalTallyResult veto count")
}

View File

@ -17,6 +17,7 @@ import (
"github.com/cosmos/cosmos-sdk/x/simulation"
)
//nolint:gosec // these are not hardcoded credentials.
const (
// OpWeightMsgSend Simulation operation weights constants
OpWeightMsgSend = "op_weight_msg_send"

View File

@ -335,7 +335,7 @@ func runQueuedOperations(queueOps map[int][]simulation.Operation,
numOpsRan = len(queuedOp)
for i := 0; i < numOpsRan; i++ {
opMsg, futureOps, err := queuedOp[i](r, app, ctx, accounts, chainID)
if futureOps != nil && len(futureOps) > 0 {
if len(futureOps) > 0 {
allFutureOps = append(allFutureOps, futureOps...)
}
@ -379,7 +379,7 @@ func runQueuedTimeOperations(queueOps []simulation.FutureOperation,
tb.FailNow()
}
if futureOps != nil && len(futureOps) > 0 {
if len(futureOps) > 0 {
allFutureOps = append(allFutureOps, futureOps...)
}

View File

@ -17,6 +17,7 @@ import (
)
// Simulation operation weights constants
//nolint:gosec // these are not hardcoded credentials.
const (
OpWeightMsgUnjail = "op_weight_msg_unjail"
)

View File

@ -3,6 +3,8 @@ package keeper
import (
"fmt"
"cosmossdk.io/math"
storetypes "github.com/cosmos/cosmos-sdk/store/types"
"github.com/tendermint/tendermint/libs/log"
@ -72,7 +74,7 @@ func (k *Keeper) SetHooks(sh types.StakingHooks) {
}
// Load the last total validator power.
func (k Keeper) GetLastTotalPower(ctx sdk.Context) sdk.Int {
func (k Keeper) GetLastTotalPower(ctx sdk.Context) math.Int {
store := ctx.KVStore(k.storeKey)
bz := store.Get(types.LastTotalPowerKey)
@ -87,7 +89,7 @@ func (k Keeper) GetLastTotalPower(ctx sdk.Context) sdk.Int {
}
// Set the last total validator power.
func (k Keeper) SetLastTotalPower(ctx sdk.Context, power sdk.Int) {
func (k Keeper) SetLastTotalPower(ctx sdk.Context, power math.Int) {
store := ctx.KVStore(k.storeKey)
bz := k.cdc.MustMarshal(&sdk.IntProto{Int: power})
store.Set(types.LastTotalPowerKey, bz)

View File

@ -15,6 +15,7 @@ import (
)
// Simulation operation weights constants
//nolint:gosec // these are not hardcoded credentials
const (
OpWeightMsgCreateValidator = "op_weight_msg_create_validator"
OpWeightMsgEditValidator = "op_weight_msg_edit_validator"

View File

@ -8,12 +8,12 @@ import (
)
func parseArgsToContent(fs *pflag.FlagSet, name string) (gov.Content, error) {
title, err := fs.GetString(cli.FlagTitle)
title, err := fs.GetString(cli.FlagTitle) //nolint:staticcheck // we are intentionally using a deprecated flag here.
if err != nil {
return nil, err
}
description, err := fs.GetString(cli.FlagDescription)
description, err := fs.GetString(cli.FlagDescription) //nolint:staticcheck // we are intentionally using a deprecated flag here.
if err != nil {
return nil, err
}

View File

@ -59,7 +59,7 @@ func NewCmdSubmitLegacyUpgradeProposal() *cobra.Command {
return err
}
if !noValidate {
prop := content.(*types.SoftwareUpgradeProposal)
prop := content.(*types.SoftwareUpgradeProposal) //nolint:staticcheck // we are intentionally using a deprecated proposal type.
var daemonName string
if daemonName, err = cmd.Flags().GetString(FlagDaemonName); err != nil {
return err
@ -93,8 +93,8 @@ func NewCmdSubmitLegacyUpgradeProposal() *cobra.Command {
},
}
cmd.Flags().String(cli.FlagTitle, "", "title of proposal")
cmd.Flags().String(cli.FlagDescription, "", "description of proposal")
cmd.Flags().String(cli.FlagTitle, "", "title of proposal") //nolint:staticcheck // we are intentionally using a deprecated flag here.
cmd.Flags().String(cli.FlagDescription, "", "description of proposal") //nolint:staticcheck // we are intentionally using a deprecated flag here.
cmd.Flags().String(cli.FlagDeposit, "", "deposit of proposal")
cmd.Flags().Int64(FlagUpgradeHeight, 0, "The height at which the upgrade must happen")
cmd.Flags().String(FlagUpgradeInfo, "", "Info for the upgrade plan such as new version download urls, etc.")
@ -129,12 +129,12 @@ func NewCmdSubmitLegacyCancelUpgradeProposal() *cobra.Command {
return err
}
title, err := cmd.Flags().GetString(cli.FlagTitle)
title, err := cmd.Flags().GetString(cli.FlagTitle) //nolint:staticcheck // we are intentionally using a deprecated flag here.
if err != nil {
return err
}
description, err := cmd.Flags().GetString(cli.FlagDescription)
description, err := cmd.Flags().GetString(cli.FlagDescription) //nolint:staticcheck // we are intentionally using a deprecated flag here.
if err != nil {
return err
}
@ -150,11 +150,11 @@ func NewCmdSubmitLegacyCancelUpgradeProposal() *cobra.Command {
},
}
cmd.Flags().String(cli.FlagTitle, "", "title of proposal")
cmd.Flags().String(cli.FlagDescription, "", "description of proposal")
cmd.Flags().String(cli.FlagTitle, "", "title of proposal") //nolint:staticcheck // we are intentionally using a deprecated flag here.
cmd.Flags().String(cli.FlagDescription, "", "description of proposal") //nolint:staticcheck // we are intentionally using a deprecated flag here.
cmd.Flags().String(cli.FlagDeposit, "", "deposit of proposal")
cmd.MarkFlagRequired(cli.FlagTitle)
cmd.MarkFlagRequired(cli.FlagDescription)
cmd.MarkFlagRequired(cli.FlagTitle) //nolint:staticcheck // we are intentionally using a deprecated flag here.
cmd.MarkFlagRequired(cli.FlagDescription) //nolint:staticcheck // we are intentionally using a deprecated flag here.
return cmd
}

View File

@ -11,6 +11,7 @@ import (
// NewSoftwareUpgradeProposalHandler creates a governance handler to manage new proposal types.
// It enables SoftwareUpgradeProposal to propose an Upgrade, and CancelSoftwareUpgradeProposal
// to abort a previously voted upgrade.
//nolint: staticcheck // we are intentionally using a deprecated proposal here.
func NewSoftwareUpgradeProposalHandler(k keeper.Keeper) govtypes.Handler {
return func(ctx sdk.Context, content govtypes.Content) error {
switch c := content.(type) {
@ -26,10 +27,12 @@ func NewSoftwareUpgradeProposalHandler(k keeper.Keeper) govtypes.Handler {
}
}
//nolint:staticcheck // we are intentionally using a deprecated proposal here.
func handleSoftwareUpgradeProposal(ctx sdk.Context, k keeper.Keeper, p *types.SoftwareUpgradeProposal) error {
return k.ScheduleUpgrade(ctx, p.Plan)
}
//nolint:staticcheck // we are intentionally using a deprecated proposal here.
func handleCancelSoftwareUpgradeProposal(ctx sdk.Context, k keeper.Keeper, _ *types.CancelSoftwareUpgradeProposal) error {
k.ClearUpgradePlan(ctx)
return nil