2020-05-30 17:09:37 +00:00
// VulcanizeDB
// Copyright © 2020 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
2020-06-29 21:11:29 +00:00
package pgipfsethdb
2020-05-30 17:09:37 +00:00
import (
2021-08-24 05:12:22 +00:00
"context"
2020-05-30 17:09:37 +00:00
"errors"
"fmt"
"strings"
2021-08-24 05:12:22 +00:00
"time"
2020-05-30 17:09:37 +00:00
"github.com/ethereum/go-ethereum/ethdb"
"github.com/jmoiron/sqlx"
2021-08-24 05:12:22 +00:00
"github.com/mailgun/groupcache/v2"
2020-05-30 17:09:37 +00:00
)
var errNotSupported = errors . New ( "this operation is not supported" )
var (
hasPgStr = "SELECT exists(select 1 from public.blocks WHERE key = $1)"
getPgStr = "SELECT data FROM public.blocks WHERE key = $1"
putPgStr = "INSERT INTO public.blocks (key, data) VALUES ($1, $2) ON CONFLICT (key) DO NOTHING"
deletePgStr = "DELETE FROM public.blocks WHERE key = $1"
2020-05-30 17:20:00 +00:00
dbSizePgStr = "SELECT pg_database_size(current_database())"
2020-05-30 17:09:37 +00:00
)
2020-06-29 21:11:29 +00:00
// Database is the type that satisfies the ethdb.Database and ethdb.KeyValueStore interfaces for PG-IPFS Ethereum data using a direct Postgres connection
2020-05-30 17:09:37 +00:00
type Database struct {
2021-08-24 05:12:22 +00:00
db * sqlx . DB
cache * groupcache . Group
}
2021-10-06 09:46:09 +00:00
func ( d * Database ) ModifyAncients ( f func ( ethdb . AncientWriteOp ) error ) ( int64 , error ) {
return 0 , errNotSupported
}
2021-08-24 05:12:22 +00:00
type CacheConfig struct {
Name string
Size int
ExpiryDuration time . Duration
2020-05-30 17:09:37 +00:00
}
2020-06-17 16:33:19 +00:00
// NewKeyValueStore returns a ethdb.KeyValueStore interface for PG-IPFS
2021-08-24 05:12:22 +00:00
func NewKeyValueStore ( db * sqlx . DB , cacheConfig CacheConfig ) ethdb . KeyValueStore {
database := Database { db : db }
database . InitCache ( cacheConfig )
return & database
2020-05-30 17:09:37 +00:00
}
2020-06-17 16:33:19 +00:00
// NewDatabase returns a ethdb.Database interface for PG-IPFS
2021-08-24 05:12:22 +00:00
func NewDatabase ( db * sqlx . DB , cacheConfig CacheConfig ) * Database {
database := Database { db : db }
database . InitCache ( cacheConfig )
return & database
}
func ( d * Database ) InitCache ( cacheConfig CacheConfig ) {
d . cache = groupcache . NewGroup ( cacheConfig . Name , int64 ( cacheConfig . Size ) , groupcache . GetterFunc (
func ( _ context . Context , id string , dest groupcache . Sink ) error {
val , err := d . dbGet ( id )
if err != nil {
return err
}
// Set the value in the groupcache, with expiry
if err := dest . SetBytes ( val , time . Now ( ) . Add ( cacheConfig . ExpiryDuration ) ) ; err != nil {
return err
}
return nil
} ,
) )
}
func ( d * Database ) GetCacheStats ( ) groupcache . Stats {
return d . cache . Stats
2020-05-30 17:09:37 +00:00
}
// Has satisfies the ethdb.KeyValueReader interface
// Has retrieves if a key is present in the key-value data store
func ( d * Database ) Has ( key [ ] byte ) ( bool , error ) {
2020-07-09 19:49:01 +00:00
mhKey , err := MultihashKeyFromKeccak256 ( key )
2020-05-30 17:09:37 +00:00
if err != nil {
return false , err
}
var exists bool
return exists , d . db . Get ( & exists , hasPgStr , mhKey )
}
2021-08-24 05:12:22 +00:00
// Get retrieves the given key if it's present in the key-value data store
func ( d * Database ) dbGet ( key string ) ( [ ] byte , error ) {
var data [ ] byte
return data , d . db . Get ( & data , getPgStr , key )
}
2020-05-30 17:09:37 +00:00
// Get satisfies the ethdb.KeyValueReader interface
// Get retrieves the given key if it's present in the key-value data store
func ( d * Database ) Get ( key [ ] byte ) ( [ ] byte , error ) {
2020-07-09 19:49:01 +00:00
mhKey , err := MultihashKeyFromKeccak256 ( key )
2020-05-30 17:09:37 +00:00
if err != nil {
return nil , err
}
2021-08-24 05:12:22 +00:00
ctx , cancel := context . WithTimeout ( context . Background ( ) , time . Millisecond * 500 )
defer cancel ( )
2020-05-30 17:09:37 +00:00
var data [ ] byte
2021-08-24 05:12:22 +00:00
return data , d . cache . Get ( ctx , mhKey , groupcache . AllocatingByteSliceSink ( & data ) )
2020-05-30 17:09:37 +00:00
}
// Put satisfies the ethdb.KeyValueWriter interface
// Put inserts the given value into the key-value data store
2020-06-29 21:11:29 +00:00
// Key is expected to be the keccak256 hash of value
2020-05-30 17:09:37 +00:00
func ( d * Database ) Put ( key [ ] byte , value [ ] byte ) error {
2020-07-09 19:49:01 +00:00
mhKey , err := MultihashKeyFromKeccak256 ( key )
2020-05-30 17:09:37 +00:00
if err != nil {
return err
}
_ , err = d . db . Exec ( putPgStr , mhKey , value )
return err
}
// Delete satisfies the ethdb.KeyValueWriter interface
// Delete removes the key from the key-value data store
func ( d * Database ) Delete ( key [ ] byte ) error {
2020-07-09 19:49:01 +00:00
mhKey , err := MultihashKeyFromKeccak256 ( key )
2020-05-30 17:09:37 +00:00
if err != nil {
return err
}
2021-08-24 05:12:22 +00:00
2020-05-30 17:09:37 +00:00
_ , err = d . db . Exec ( deletePgStr , mhKey )
2021-08-24 05:12:22 +00:00
if err != nil {
return err
}
// Remove from cache.
ctx , cancel := context . WithTimeout ( context . Background ( ) , time . Millisecond * 500 )
defer cancel ( )
err = d . cache . Remove ( ctx , mhKey )
2020-05-30 17:09:37 +00:00
return err
}
// DatabaseProperty enum type
type DatabaseProperty int
const (
Unknown DatabaseProperty = iota
Size
Idle
InUse
MaxIdleClosed
MaxLifetimeClosed
MaxOpenConnections
OpenConnections
WaitCount
WaitDuration
)
// DatabasePropertyFromString helper function
func DatabasePropertyFromString ( property string ) ( DatabaseProperty , error ) {
switch strings . ToLower ( property ) {
case "size" :
return Size , nil
case "idle" :
return Idle , nil
case "inuse" :
return InUse , nil
case "maxidleclosed" :
return MaxIdleClosed , nil
case "maxlifetimeclosed" :
return MaxLifetimeClosed , nil
case "maxopenconnections" :
return MaxOpenConnections , nil
case "openconnections" :
return OpenConnections , nil
case "waitcount" :
return WaitCount , nil
case "waitduration" :
return WaitDuration , nil
default :
return Unknown , fmt . Errorf ( "unknown database property" )
}
}
// Stat satisfies the ethdb.Stater interface
// Stat returns a particular internal stat of the database
func ( d * Database ) Stat ( property string ) ( string , error ) {
prop , err := DatabasePropertyFromString ( property )
if err != nil {
return "" , err
}
switch prop {
case Size :
var byteSize string
2020-05-30 17:20:00 +00:00
return byteSize , d . db . Get ( & byteSize , dbSizePgStr )
2020-05-30 17:09:37 +00:00
case Idle :
return string ( d . db . Stats ( ) . Idle ) , nil
case InUse :
return string ( d . db . Stats ( ) . InUse ) , nil
case MaxIdleClosed :
return string ( d . db . Stats ( ) . MaxIdleClosed ) , nil
case MaxLifetimeClosed :
return string ( d . db . Stats ( ) . MaxLifetimeClosed ) , nil
case MaxOpenConnections :
return string ( d . db . Stats ( ) . MaxOpenConnections ) , nil
case OpenConnections :
return string ( d . db . Stats ( ) . OpenConnections ) , nil
case WaitCount :
return string ( d . db . Stats ( ) . WaitCount ) , nil
case WaitDuration :
return d . db . Stats ( ) . WaitDuration . String ( ) , nil
default :
return "" , fmt . Errorf ( "unhandled database property" )
}
}
// Compact satisfies the ethdb.Compacter interface
// Compact flattens the underlying data store for the given key range
func ( d * Database ) Compact ( start [ ] byte , limit [ ] byte ) error {
return errNotSupported
}
// NewBatch satisfies the ethdb.Batcher interface
// NewBatch creates a write-only database that buffers changes to its host db
// until a final write is called
func ( d * Database ) NewBatch ( ) ethdb . Batch {
2020-07-09 19:49:28 +00:00
return NewBatch ( d . db , nil )
2020-05-30 17:09:37 +00:00
}
// NewIterator satisfies the ethdb.Iteratee interface
// it creates a binary-alphabetical iterator over a subset
// of database content with a particular key prefix, starting at a particular
// initial key (or after, if it does not exist).
//
// Note: This method assumes that the prefix is NOT part of the start, so there's
// no need for the caller to prepend the prefix to the start
func ( d * Database ) NewIterator ( prefix [ ] byte , start [ ] byte ) ethdb . Iterator {
2020-06-17 16:33:19 +00:00
return NewIterator ( start , prefix , d . db )
2020-05-30 17:09:37 +00:00
}
// Close satisfies the io.Closer interface
// Close closes the db connection
func ( d * Database ) Close ( ) error {
return d . db . DB . Close ( )
}
// HasAncient satisfies the ethdb.AncientReader interface
// HasAncient returns an indicator whether the specified data exists in the ancient store
func ( d * Database ) HasAncient ( kind string , number uint64 ) ( bool , error ) {
return false , errNotSupported
}
// Ancient satisfies the ethdb.AncientReader interface
// Ancient retrieves an ancient binary blob from the append-only immutable files
func ( d * Database ) Ancient ( kind string , number uint64 ) ( [ ] byte , error ) {
return nil , errNotSupported
}
// Ancients satisfies the ethdb.AncientReader interface
// Ancients returns the ancient item numbers in the ancient store
func ( d * Database ) Ancients ( ) ( uint64 , error ) {
return 0 , errNotSupported
}
// AncientSize satisfies the ethdb.AncientReader interface
// AncientSize returns the ancient size of the specified category
func ( d * Database ) AncientSize ( kind string ) ( uint64 , error ) {
return 0 , errNotSupported
}
// AppendAncient satisfies the ethdb.AncientWriter interface
// AppendAncient injects all binary blobs belong to block at the end of the append-only immutable table files
func ( d * Database ) AppendAncient ( number uint64 , hash , header , body , receipt , td [ ] byte ) error {
return errNotSupported
}
2021-08-24 13:14:59 +00:00
// ReadAncients retrieves multiple items in sequence, starting from the index 'start'.
// It will return
// - at most 'count' items,
// - at least 1 item (even if exceeding the maxBytes), but will otherwise
// return as many items as fit into maxBytes.
func ( d * Database ) ReadAncients ( kind string , start , count , maxBytes uint64 ) ( [ ] [ ] byte , error ) {
return nil , errNotSupported
}
2020-05-30 17:09:37 +00:00
// TruncateAncients satisfies the ethdb.AncientWriter interface
// TruncateAncients discards all but the first n ancient data from the ancient store
func ( d * Database ) TruncateAncients ( n uint64 ) error {
return errNotSupported
}
// Sync satisfies the ethdb.AncientWriter interface
// Sync flushes all in-memory ancient store data to disk
func ( d * Database ) Sync ( ) error {
return errNotSupported
2020-05-30 17:10:48 +00:00
}