Merge pull request #58 from cerc-io/roy/v5-dev

Update to v5
This commit is contained in:
Ian Norden 2023-04-09 12:16:03 -05:00 committed by GitHub
commit 05559e2551
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 21 additions and 10 deletions

View File

@ -1,6 +1,6 @@
## ipfs-ethdb ## ipfs-ethdb
[![Go Report Card](https://goreportcard.com/badge/github.com/vulcanize/ipfs-ethdb)](https://goreportcard.com/report/github.com/vulcanize/ipfs-ethdb) [![Go Report Card](https://goreportcard.com/badge/github.com/cerc/ipfs-ethdb)](https://goreportcard.com/report/github.com/cerc-io/ipfs-ethdb)
> go-ethereum ethdb interfaces for Ethereum state data stored in IPFS > go-ethereum ethdb interfaces for Ethereum state data stored in IPFS
@ -11,7 +11,7 @@ interfacing with a state database. These interfaces are used to build higher-lev
which are used to perform the bulk of state related needs. which are used to perform the bulk of state related needs.
Ethereum data can be stored on IPFS, standard codecs for Etheruem data are defined in the [go-cid](https://github.com/ipfs/go-cid) library. Ethereum data can be stored on IPFS, standard codecs for Etheruem data are defined in the [go-cid](https://github.com/ipfs/go-cid) library.
Using our [statediffing geth client](https://github.com/vulcanize/go-ethereum/releases/tag/v1.9.11-statediff-0.0.2) it is feasible to extract every single Using our [statediffing geth client](https://github.com/cerc-io/go-ethereum/releases/tag/v1.9.11-statediff-0.0.2) it is feasible to extract every single
state and storage node and publish it to IPFS. state and storage node and publish it to IPFS.
Geth stores state data in leveldb as key-value pairs between the keccak256 hash of the rlp-encoded object and the rlp-encoded object. Geth stores state data in leveldb as key-value pairs between the keccak256 hash of the rlp-encoded object and the rlp-encoded object.
@ -50,7 +50,7 @@ import (
"github.com/ipfs/go-ipfs/core" "github.com/ipfs/go-ipfs/core"
"github.com/ipfs/go-ipfs/repo/fsrepo" "github.com/ipfs/go-ipfs/repo/fsrepo"
"github.com/jmoiron/sqlx" "github.com/jmoiron/sqlx"
"github.com/vulcanize/ipfs-ethdb/v5" "github.com/cerc-io/ipfs-ethdb/v5"
) )
func main() { func main() {

View File

@ -1,6 +1,6 @@
## ipfs-ethdb ## ipfs-ethdb
IPFS has been [extended](https://github.com/vulcanize/go-ipfs/releases/tag/v0.4.22-alpha) to [use Postgres](https://github.com/vulcanize/go-ipfs-config/releases/tag/v0.0.8-alpha) as a backing [datastore](https://github.com/ipfs/go-ds-sql/tree/master/postgres). IPFS has been [extended](https://github.com/cerc-io/go-ipfs/releases/tag/v0.4.22-alpha) to [use Postgres](https://github.com/cerc-io/go-ipfs-config/releases/tag/v0.0.8-alpha) as a backing [datastore](https://github.com/ipfs/go-ds-sql/tree/master/postgres).
Interfacing directly with the IPFS-backing Postgres database has some advantages over using the blockservice interface. Interfacing directly with the IPFS-backing Postgres database has some advantages over using the blockservice interface.
Namely, batching of IPFS writes with other Postgres writes and avoiding lock contention on the ipfs repository (lockfile located at the `IPFS_PATH`). Namely, batching of IPFS writes with other Postgres writes and avoiding lock contention on the ipfs repository (lockfile located at the `IPFS_PATH`).
The downside is that we forgo the block-exchange capabilities of the blockservice, and are only able to fetch data contained in the local datastore. The downside is that we forgo the block-exchange capabilities of the blockservice, and are only able to fetch data contained in the local datastore.
@ -18,7 +18,7 @@ import (
"github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/trie"
"github.com/jmoiron/sqlx" "github.com/jmoiron/sqlx"
"github.com/vulcanize/ipfs-ethdb/v5/postgres" "github.com/cerc-io/ipfs-ethdb/v5/postgres/v1"
) )
func main() { func main() {
@ -31,7 +31,11 @@ func main() {
trieNodeIterator := t.NodeIterator([]byte{}) trieNodeIterator := t.NodeIterator([]byte{})
// do stuff with trie node iterator // do stuff with trie node iterator
database := pgipfsethdb.NewDatabase(db) database := pgipfsethdb.NewDatabase(db, pgipfsethdb.CacheConfig{
Name: "db",
Size: 3000000, // 3MB
ExpiryDuration: time.Hour,
})
stateDatabase := state.NewDatabase(database) stateDatabase := state.NewDatabase(database)
stateDB, _ := state.New(common.Hash{}, stateDatabase, nil) stateDB, _ := state.New(common.Hash{}, stateDatabase, nil)
stateDBNodeIterator := state.NewNodeIterator(stateDB) stateDBNodeIterator := state.NewNodeIterator(stateDB)

View File

@ -16,7 +16,9 @@
package shared package shared
import "github.com/jmoiron/sqlx" import (
"github.com/jmoiron/sqlx"
)
/* /*
Hostname: "localhost", Hostname: "localhost",
@ -25,7 +27,6 @@ import "github.com/jmoiron/sqlx"
Username: "vdbm", Username: "vdbm",
Password: "password", Password: "password",
*/ */
// TestDB connect to the testing database // TestDB connect to the testing database
// it assumes the database has the IPFS ipld.blocks table present // it assumes the database has the IPFS ipld.blocks table present
// DO NOT use a production db for the test db, as it will remove all contents of the ipld.blocks table // DO NOT use a production db for the test db, as it will remove all contents of the ipld.blocks table

View File

@ -119,7 +119,7 @@ func (d *Database) dbGet(key string) ([]byte, error) {
var data []byte var data []byte
err := d.db.Get(&data, getPgStr, key) err := d.db.Get(&data, getPgStr, key)
if err == sql.ErrNoRows { if err == sql.ErrNoRows {
log.Warn("Database miss for key", key) log.Warn("Database miss for key ", key)
} }
return data, err return data, err

View File

@ -40,6 +40,12 @@ var (
putPgStr = "INSERT INTO ipld.blocks (key, data, block_number) VALUES ($1, $2, $3) ON CONFLICT DO NOTHING" putPgStr = "INSERT INTO ipld.blocks (key, data, block_number) VALUES ($1, $2, $3) ON CONFLICT DO NOTHING"
deletePgStr = "DELETE FROM ipld.blocks WHERE key = $1" deletePgStr = "DELETE FROM ipld.blocks WHERE key = $1"
dbSizePgStr = "SELECT pg_database_size(current_database())" dbSizePgStr = "SELECT pg_database_size(current_database())"
DefaultCacheConfig = CacheConfig{
Name: "db",
Size: 3000000, // 3MB
ExpiryDuration: time.Hour,
}
) )
var _ ethdb.Database = &Database{} var _ ethdb.Database = &Database{}

View File

@ -29,7 +29,7 @@ func Keccak256ToCid(h []byte, codec uint64) (cid.Cid, error) {
if err != nil { if err != nil {
return cid.Cid{}, err return cid.Cid{}, err
} }
return cid.NewCidV1(codec, multihash.Multihash(buf)), nil return cid.NewCidV1(codec, buf), nil
} }
// NewBlock takes a keccak256 hash key and the rlp []byte value it was derived from and creates an ipfs block object // NewBlock takes a keccak256 hash key and the rlp []byte value it was derived from and creates an ipfs block object