forked from cerc-io/plugeth
fd4f60f49b
* cmd,core: add simple legacy receipt converter core/rawdb: use forEach in migrate core/rawdb: batch reads in forEach core/rawdb: make forEach anonymous fn cmd/geth: check for legacy receipts on node startup fix err msg Co-authored-by: rjl493456442 <garyrong0905@gmail.com> fix log Co-authored-by: rjl493456442 <garyrong0905@gmail.com> fix some review comments add warning to cmd drop isLegacy fn from migrateTable params add test for windows rename test replacing in windows case * minor fix * sanity check for tail-deletion * add log before moving files around * speed-up hack for mainnet * fix mainnet check, use networkid instead * check mainnet genesis * review fixes * resume previous migration attempt * core/rawdb: lint fix Co-authored-by: Martin Holst Swende <martin@swende.se>
177 lines
6.2 KiB
Go
177 lines
6.2 KiB
Go
// Copyright 2014 The go-ethereum Authors
|
|
// This file is part of the go-ethereum library.
|
|
//
|
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
|
// it under the terms of the GNU Lesser General Public License as published by
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
// (at your option) any later version.
|
|
//
|
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
// GNU Lesser General Public License for more details.
|
|
//
|
|
// You should have received a copy of the GNU Lesser General Public License
|
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
// Package ethdb defines the interfaces for an Ethereum data store.
|
|
package ethdb
|
|
|
|
import "io"
|
|
|
|
// KeyValueReader wraps the Has and Get method of a backing data store.
|
|
type KeyValueReader interface {
|
|
// Has retrieves if a key is present in the key-value data store.
|
|
Has(key []byte) (bool, error)
|
|
|
|
// Get retrieves the given key if it's present in the key-value data store.
|
|
Get(key []byte) ([]byte, error)
|
|
}
|
|
|
|
// KeyValueWriter wraps the Put method of a backing data store.
|
|
type KeyValueWriter interface {
|
|
// Put inserts the given value into the key-value data store.
|
|
Put(key []byte, value []byte) error
|
|
|
|
// Delete removes the key from the key-value data store.
|
|
Delete(key []byte) error
|
|
}
|
|
|
|
// Stater wraps the Stat method of a backing data store.
|
|
type Stater interface {
|
|
// Stat returns a particular internal stat of the database.
|
|
Stat(property string) (string, error)
|
|
}
|
|
|
|
// Compacter wraps the Compact method of a backing data store.
|
|
type Compacter interface {
|
|
// Compact flattens the underlying data store for the given key range. In essence,
|
|
// deleted and overwritten versions are discarded, and the data is rearranged to
|
|
// reduce the cost of operations needed to access them.
|
|
//
|
|
// A nil start is treated as a key before all keys in the data store; a nil limit
|
|
// is treated as a key after all keys in the data store. If both is nil then it
|
|
// will compact entire data store.
|
|
Compact(start []byte, limit []byte) error
|
|
}
|
|
|
|
// KeyValueStore contains all the methods required to allow handling different
|
|
// key-value data stores backing the high level database.
|
|
type KeyValueStore interface {
|
|
KeyValueReader
|
|
KeyValueWriter
|
|
Batcher
|
|
Iteratee
|
|
Stater
|
|
Compacter
|
|
Snapshotter
|
|
io.Closer
|
|
}
|
|
|
|
// AncientReader contains the methods required to read from immutable ancient data.
|
|
type AncientReader interface {
|
|
// HasAncient returns an indicator whether the specified data exists in the
|
|
// ancient store.
|
|
HasAncient(kind string, number uint64) (bool, error)
|
|
|
|
// Ancient retrieves an ancient binary blob from the append-only immutable files.
|
|
Ancient(kind string, number uint64) ([]byte, error)
|
|
|
|
// AncientRange retrieves multiple items in sequence, starting from the index 'start'.
|
|
// It will return
|
|
// - at most 'count' items,
|
|
// - at least 1 item (even if exceeding the maxBytes), but will otherwise
|
|
// return as many items as fit into maxBytes.
|
|
AncientRange(kind string, start, count, maxBytes uint64) ([][]byte, error)
|
|
|
|
// Ancients returns the ancient item numbers in the ancient store.
|
|
Ancients() (uint64, error)
|
|
|
|
// Tail returns the number of first stored item in the freezer.
|
|
// This number can also be interpreted as the total deleted item numbers.
|
|
Tail() (uint64, error)
|
|
|
|
// AncientSize returns the ancient size of the specified category.
|
|
AncientSize(kind string) (uint64, error)
|
|
}
|
|
|
|
// AncientBatchReader is the interface for 'batched' or 'atomic' reading.
|
|
type AncientBatchReader interface {
|
|
AncientReader
|
|
|
|
// ReadAncients runs the given read operation while ensuring that no writes take place
|
|
// on the underlying freezer.
|
|
ReadAncients(fn func(AncientReader) error) (err error)
|
|
}
|
|
|
|
// AncientWriter contains the methods required to write to immutable ancient data.
|
|
type AncientWriter interface {
|
|
// ModifyAncients runs a write operation on the ancient store.
|
|
// If the function returns an error, any changes to the underlying store are reverted.
|
|
// The integer return value is the total size of the written data.
|
|
ModifyAncients(func(AncientWriteOp) error) (int64, error)
|
|
|
|
// TruncateHead discards all but the first n ancient data from the ancient store.
|
|
// After the truncation, the latest item can be accessed it item_n-1(start from 0).
|
|
TruncateHead(n uint64) error
|
|
|
|
// TruncateTail discards the first n ancient data from the ancient store. The already
|
|
// deleted items are ignored. After the truncation, the earliest item can be accessed
|
|
// is item_n(start from 0). The deleted items may not be removed from the ancient store
|
|
// immediately, but only when the accumulated deleted data reach the threshold then
|
|
// will be removed all together.
|
|
TruncateTail(n uint64) error
|
|
|
|
// Sync flushes all in-memory ancient store data to disk.
|
|
Sync() error
|
|
|
|
// MigrateTable processes and migrates entries of a given table to a new format.
|
|
// The second argument is a function that takes a raw entry and returns it
|
|
// in the newest format.
|
|
MigrateTable(string, func([]byte) ([]byte, error)) error
|
|
}
|
|
|
|
// AncientWriteOp is given to the function argument of ModifyAncients.
|
|
type AncientWriteOp interface {
|
|
// Append adds an RLP-encoded item.
|
|
Append(kind string, number uint64, item interface{}) error
|
|
|
|
// AppendRaw adds an item without RLP-encoding it.
|
|
AppendRaw(kind string, number uint64, item []byte) error
|
|
}
|
|
|
|
// Reader contains the methods required to read data from both key-value as well as
|
|
// immutable ancient data.
|
|
type Reader interface {
|
|
KeyValueReader
|
|
AncientBatchReader
|
|
}
|
|
|
|
// Writer contains the methods required to write data to both key-value as well as
|
|
// immutable ancient data.
|
|
type Writer interface {
|
|
KeyValueWriter
|
|
AncientWriter
|
|
}
|
|
|
|
// AncientStore contains all the methods required to allow handling different
|
|
// ancient data stores backing immutable chain data store.
|
|
type AncientStore interface {
|
|
AncientBatchReader
|
|
AncientWriter
|
|
io.Closer
|
|
}
|
|
|
|
// Database contains all the methods required by the high level database to not
|
|
// only access the key-value data store but also the chain freezer.
|
|
type Database interface {
|
|
Reader
|
|
Writer
|
|
Batcher
|
|
Iteratee
|
|
Stater
|
|
Compacter
|
|
Snapshotter
|
|
io.Closer
|
|
}
|