cosmos-sdk/store/db/prefixdb.go
Julien Robert 13780bd6d5
feat(x/gov): add yes_quorum parameter (#19167)
Co-authored-by: Robert Zaremba <robert@zaremba.ch>
Co-authored-by: Facundo Medica <14063057+facundomedica@users.noreply.github.com>
2024-01-30 22:04:24 +00:00

349 lines
7.4 KiB
Go

package db
import (
"bytes"
"fmt"
"sync"
corestore "cosmossdk.io/core/store"
"cosmossdk.io/store/v2"
)
// PrefixDB wraps a namespace of another database as a logical database.
type PrefixDB struct {
mtx sync.Mutex
prefix []byte
db store.RawDB
}
var _ store.RawDB = (*PrefixDB)(nil)
// NewPrefixDB lets you namespace multiple RawDBs within a single RawDB.
func NewPrefixDB(db store.RawDB, prefix []byte) *PrefixDB {
return &PrefixDB{
prefix: prefix,
db: db,
}
}
// Get implements RawDB.
func (pdb *PrefixDB) Get(key []byte) ([]byte, error) {
if len(key) == 0 {
return nil, store.ErrKeyEmpty
}
pkey := pdb.prefixed(key)
value, err := pdb.db.Get(pkey)
if err != nil {
return nil, err
}
return value, nil
}
// Has implements RawDB.
func (pdb *PrefixDB) Has(key []byte) (bool, error) {
if len(key) == 0 {
return false, store.ErrKeyEmpty
}
ok, err := pdb.db.Has(pdb.prefixed(key))
if err != nil {
return ok, err
}
return ok, nil
}
// Iterator implements RawDB.
func (pdb *PrefixDB) Iterator(start, end []byte) (corestore.Iterator, error) {
if (start != nil && len(start) == 0) || (end != nil && len(end) == 0) {
return nil, store.ErrKeyEmpty
}
var pstart, pend []byte
pstart = append(cp(pdb.prefix), start...)
if end == nil {
pend = cpIncr(pdb.prefix)
} else {
pend = append(cp(pdb.prefix), end...)
}
itr, err := pdb.db.Iterator(pstart, pend)
if err != nil {
return nil, err
}
return newPrefixIterator(pdb.prefix, start, end, itr)
}
// ReverseIterator implements RawDB.
func (pdb *PrefixDB) ReverseIterator(start, end []byte) (corestore.Iterator, error) {
if (start != nil && len(start) == 0) || (end != nil && len(end) == 0) {
return nil, store.ErrKeyEmpty
}
var pstart, pend []byte
pstart = append(cp(pdb.prefix), start...)
if end == nil {
pend = cpIncr(pdb.prefix)
} else {
pend = append(cp(pdb.prefix), end...)
}
ritr, err := pdb.db.ReverseIterator(pstart, pend)
if err != nil {
return nil, err
}
return newPrefixIterator(pdb.prefix, start, end, ritr)
}
// NewBatch implements RawDB.
func (pdb *PrefixDB) NewBatch() store.RawBatch {
return newPrefixBatch(pdb.prefix, pdb.db.NewBatch())
}
// NewBatchWithSize implements RawDB.
func (pdb *PrefixDB) NewBatchWithSize(size int) store.RawBatch {
return newPrefixBatch(pdb.prefix, pdb.db.NewBatchWithSize(size))
}
// Close implements RawDB.
func (pdb *PrefixDB) Close() error {
pdb.mtx.Lock()
defer pdb.mtx.Unlock()
return pdb.db.Close()
}
// Print implements RawDB.
func (pdb *PrefixDB) Print() error {
fmt.Printf("prefix: %X\n", pdb.prefix)
itr, err := pdb.Iterator(nil, nil)
if err != nil {
return err
}
defer itr.Close()
for ; itr.Valid(); itr.Next() {
key := itr.Key()
value := itr.Value()
fmt.Printf("[%X]:\t[%X]\n", key, value)
}
return nil
}
func (pdb *PrefixDB) prefixed(key []byte) []byte {
return append(cp(pdb.prefix), key...)
}
// IteratePrefix is a convenience function for iterating over a key domain
// restricted by prefix.
func IteratePrefix(db store.RawDB, prefix []byte) (corestore.Iterator, error) {
var start, end []byte
if len(prefix) == 0 {
start = nil
end = nil
} else {
start = cp(prefix)
end = cpIncr(prefix)
}
itr, err := db.Iterator(start, end)
if err != nil {
return nil, err
}
return itr, nil
}
// Strips prefix while iterating from Iterator.
type prefixDBIterator struct {
prefix []byte
start []byte
end []byte
source corestore.Iterator
valid bool
err error
}
var _ corestore.Iterator = (*prefixDBIterator)(nil)
func newPrefixIterator(prefix, start, end []byte, source corestore.Iterator) (*prefixDBIterator, error) {
pitrInvalid := &prefixDBIterator{
prefix: prefix,
start: start,
end: end,
source: source,
valid: false,
}
// Empty keys are not allowed, so if a key exists in the database that exactly matches the
// prefix we need to skip it.
if source.Valid() && bytes.Equal(source.Key(), prefix) {
source.Next()
}
if !source.Valid() || !bytes.HasPrefix(source.Key(), prefix) {
return pitrInvalid, nil
}
return &prefixDBIterator{
prefix: prefix,
start: start,
end: end,
source: source,
valid: true,
}, nil
}
// Domain implements Iterator.
func (itr *prefixDBIterator) Domain() (start, end []byte) {
return itr.start, itr.end
}
// Valid implements Iterator.
func (itr *prefixDBIterator) Valid() bool {
if !itr.valid || itr.err != nil || !itr.source.Valid() {
return false
}
key := itr.source.Key()
if len(key) < len(itr.prefix) || !bytes.Equal(key[:len(itr.prefix)], itr.prefix) {
itr.err = fmt.Errorf("received invalid key from backend: %x (expected prefix %x)",
key, itr.prefix)
return false
}
return true
}
// Next implements Iterator.
func (itr *prefixDBIterator) Next() {
itr.assertIsValid()
itr.source.Next()
if !itr.source.Valid() || !bytes.HasPrefix(itr.source.Key(), itr.prefix) {
itr.valid = false
} else if bytes.Equal(itr.source.Key(), itr.prefix) {
// Empty keys are not allowed, so if a key exists in the database that exactly matches the
// prefix we need to skip it.
itr.Next()
}
}
// Next implements Iterator.
func (itr *prefixDBIterator) Key() []byte {
itr.assertIsValid()
key := itr.source.Key()
return key[len(itr.prefix):] // we have checked the key in Valid()
}
// Value implements Iterator.
func (itr *prefixDBIterator) Value() []byte {
itr.assertIsValid()
return itr.source.Value()
}
// Error implements Iterator.
func (itr *prefixDBIterator) Error() error {
if err := itr.source.Error(); err != nil {
return err
}
return itr.err
}
// Close implements Iterator.
func (itr *prefixDBIterator) Close() error {
return itr.source.Close()
}
func (itr *prefixDBIterator) assertIsValid() {
if !itr.Valid() {
panic("iterator is invalid")
}
}
type prefixDBBatch struct {
prefix []byte
source store.RawBatch
}
var _ store.RawBatch = (*prefixDBBatch)(nil)
func newPrefixBatch(prefix []byte, source store.RawBatch) prefixDBBatch {
return prefixDBBatch{
prefix: prefix,
source: source,
}
}
// Set implements RawBatch.
func (pb prefixDBBatch) Set(key, value []byte) error {
if len(key) == 0 {
return store.ErrKeyEmpty
}
if value == nil {
return store.ErrValueNil
}
pkey := append(cp(pb.prefix), key...)
return pb.source.Set(pkey, value)
}
// Delete implements RawBatch.
func (pb prefixDBBatch) Delete(key []byte) error {
if len(key) == 0 {
return store.ErrKeyEmpty
}
pkey := append(cp(pb.prefix), key...)
return pb.source.Delete(pkey)
}
// Write implements RawBatch.
func (pb prefixDBBatch) Write() error {
return pb.source.Write()
}
// WriteSync implements RawBatch.
func (pb prefixDBBatch) WriteSync() error {
return pb.source.WriteSync()
}
// Close implements RawBatch.
func (pb prefixDBBatch) Close() error {
return pb.source.Close()
}
// GetByteSize implements RawBatch
func (pb prefixDBBatch) GetByteSize() (int, error) {
if pb.source == nil {
return 0, store.ErrBatchClosed
}
return pb.source.GetByteSize()
}
func cp(bz []byte) (ret []byte) {
ret = make([]byte, len(bz))
copy(ret, bz)
return ret
}
// Returns a slice of the same length (big endian)
// except incremented by one.
// Returns nil on overflow (e.g. if bz bytes are all 0xFF)
// CONTRACT: len(bz) > 0
func cpIncr(bz []byte) (ret []byte) {
if len(bz) == 0 {
panic("cpIncr expects non-zero bz length")
}
ret = cp(bz)
for i := len(bz) - 1; i >= 0; i-- {
if ret[i] < byte(0xFF) {
ret[i]++
return
}
ret[i] = byte(0x00)
if i == 0 {
// Overflow
return nil
}
}
return nil
}