Merge pull request #196 from vulcanize/pool_optimization

Pool optimization
This commit is contained in:
Ian Norden 2020-05-12 09:09:01 -05:00 committed by GitHub
commit bb14c01529
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 126 additions and 60 deletions

View File

@ -7,7 +7,7 @@ CREATE TABLE btc.header_cids (
cid TEXT NOT NULL, cid TEXT NOT NULL,
timestamp NUMERIC NOT NULL, timestamp NUMERIC NOT NULL,
bits BIGINT NOT NULL, bits BIGINT NOT NULL,
node_id INTEGER NOT NULL REFERENCES nodes (id) ON DELETE CASCADE, node_id INTEGER NOT NULL REFERENCES nodes (id) ON DELETE CASCADE,
UNIQUE (block_number, block_hash) UNIQUE (block_number, block_hash)
); );

View File

@ -5,16 +5,18 @@
user = "vdbm" # $DATABASE_USER user = "vdbm" # $DATABASE_USER
password = "" # $DATABASE_PASSWORD password = "" # $DATABASE_PASSWORD
[ipfs] [database.sync]
path = "~/.ipfs" # $IPFS_PATH maxIdle = 1
[database.backFill]
maxIdle = 5
[resync] [resync]
chain = "bitcoin" # $RESYNC_CHAIN chain = "bitcoin" # $RESYNC_CHAIN
type = "full" # $RESYNC_TYPE type = "full" # $RESYNC_TYPE
start = 0 # $RESYNC_START start = 0 # $RESYNC_START
stop = 0 # $RESYNC_STOP stop = 0 # $RESYNC_STOP
batchSize = 1 # $RESYNC_BATCH_SIZE batchSize = 5 # $RESYNC_BATCH_SIZE
batchNumber = 50 # $RESYNC_BATCH_NUMBER batchNumber = 5 # $RESYNC_BATCH_NUMBER
clearOldCache = false # $RESYNC_CLEAR_OLD_CACHE clearOldCache = false # $RESYNC_CLEAR_OLD_CACHE
resetValidation = true # $RESYNC_RESET_VALIDATION resetValidation = true # $RESYNC_RESET_VALIDATION
@ -28,8 +30,8 @@
workers = 1 # $SUPERNODE_WORKERS workers = 1 # $SUPERNODE_WORKERS
backFill = true # $SUPERNODE_BACKFILL backFill = true # $SUPERNODE_BACKFILL
frequency = 45 # $SUPERNODE_FREQUENCY frequency = 45 # $SUPERNODE_FREQUENCY
batchSize = 1 # $SUPERNODE_BATCH_SIZE batchSize = 5 # $SUPERNODE_BATCH_SIZE
batchNumber = 50 # $SUPERNODE_BATCH_NUMBER batchNumber = 5 # $SUPERNODE_BATCH_NUMBER
validationLevel = 1 # $SUPERNODE_VALIDATION_LEVEL validationLevel = 1 # $SUPERNODE_VALIDATION_LEVEL
[bitcoin] [bitcoin]

View File

@ -5,8 +5,10 @@
user = "vdbm" # $DATABASE_USER user = "vdbm" # $DATABASE_USER
password = "" # $DATABASE_PASSWORD password = "" # $DATABASE_PASSWORD
[ipfs] [database.sync]
path = "~/.ipfs" # $IPFS_PATH maxIdle = 1
[database.backFill]
maxIdle = 5
[resync] [resync]
chain = "ethereum" # $RESYNC_CHAIN chain = "ethereum" # $RESYNC_CHAIN
@ -14,9 +16,9 @@
start = 0 # $RESYNC_START start = 0 # $RESYNC_START
stop = 0 # $RESYNC_STOP stop = 0 # $RESYNC_STOP
batchSize = 5 # $RESYNC_BATCH_SIZE batchSize = 5 # $RESYNC_BATCH_SIZE
batchNumber = 50 # $RESYNC_BATCH_NUMBER batchNumber = 5 # $RESYNC_BATCH_NUMBER
timeout = 300 # $HTTP_TIMEOUT timeout = 300 # $HTTP_TIMEOUT
clearOldCache = false # $RESYNC_CLEAR_OLD_CACHE clearOldCache = true # $RESYNC_CLEAR_OLD_CACHE
resetValidation = true # $RESYNC_RESET_VALIDATION resetValidation = true # $RESYNC_RESET_VALIDATION
[superNode] [superNode]
@ -30,7 +32,7 @@
backFill = true # $SUPERNODE_BACKFILL backFill = true # $SUPERNODE_BACKFILL
frequency = 15 # $SUPERNODE_FREQUENCY frequency = 15 # $SUPERNODE_FREQUENCY
batchSize = 5 # $SUPERNODE_BATCH_SIZE batchSize = 5 # $SUPERNODE_BATCH_SIZE
batchNumber = 50 # $SUPERNODE_BATCH_NUMBER batchNumber = 5 # $SUPERNODE_BATCH_NUMBER
timeout = 300 # $HTTP_TIMEOUT timeout = 300 # $HTTP_TIMEOUT
validationLevel = 1 # $SUPERNODE_VALIDATION_LEVEL validationLevel = 1 # $SUPERNODE_VALIDATION_LEVEL

View File

@ -1,22 +0,0 @@
// VulcanizeDB
// Copyright © 2019 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package config
type Config struct {
Database Database
Client Client
}

View File

@ -24,19 +24,25 @@ import (
// Env variables // Env variables
const ( const (
DATABASE_NAME = "DATABASE_NAME" DATABASE_NAME = "DATABASE_NAME"
DATABASE_HOSTNAME = "DATABASE_HOSTNAME" DATABASE_HOSTNAME = "DATABASE_HOSTNAME"
DATABASE_PORT = "DATABASE_PORT" DATABASE_PORT = "DATABASE_PORT"
DATABASE_USER = "DATABASE_USER" DATABASE_USER = "DATABASE_USER"
DATABASE_PASSWORD = "DATABASE_PASSWORD" DATABASE_PASSWORD = "DATABASE_PASSWORD"
DATABASE_MAX_IDLE_CONNECTIONS = "DATABASE_MAX_IDLE_CONNECTIONS"
DATABASE_MAX_OPEN_CONNECTIONS = "DATABASE_MAX_OPEN_CONNECTIONS"
DATABASE_MAX_CONN_LIFETIME = "DATABASE_MAX_CONN_LIFETIME"
) )
type Database struct { type Database struct {
Hostname string Hostname string
Name string Name string
User string User string
Password string Password string
Port int Port int
MaxIdle int
MaxOpen int
MaxLifetime int
} }
func DbConnectionString(dbConfig Database) string { func DbConnectionString(dbConfig Database) string {
@ -57,9 +63,16 @@ func (d *Database) Init() {
viper.BindEnv("database.port", DATABASE_PORT) viper.BindEnv("database.port", DATABASE_PORT)
viper.BindEnv("database.user", DATABASE_USER) viper.BindEnv("database.user", DATABASE_USER)
viper.BindEnv("database.password", DATABASE_PASSWORD) viper.BindEnv("database.password", DATABASE_PASSWORD)
viper.BindEnv("database.maxIdle", DATABASE_MAX_IDLE_CONNECTIONS)
viper.BindEnv("database.maxOpen", DATABASE_MAX_OPEN_CONNECTIONS)
viper.BindEnv("database.maxLifetime", DATABASE_MAX_CONN_LIFETIME)
d.Name = viper.GetString("database.name") d.Name = viper.GetString("database.name")
d.Hostname = viper.GetString("database.hostname") d.Hostname = viper.GetString("database.hostname")
d.Port = viper.GetInt("database.port") d.Port = viper.GetInt("database.port")
d.User = viper.GetString("database.user") d.User = viper.GetString("database.user")
d.Password = viper.GetString("database.password") d.Password = viper.GetString("database.password")
d.MaxIdle = viper.GetInt("database.maxIdle")
d.MaxOpen = viper.GetInt("database.maxOpen")
d.MaxLifetime = viper.GetInt("database.maxLifetime")
} }

View File

@ -17,6 +17,8 @@
package postgres package postgres
import ( import (
"time"
"github.com/jmoiron/sqlx" "github.com/jmoiron/sqlx"
_ "github.com/lib/pq" //postgres driver _ "github.com/lib/pq" //postgres driver
"github.com/vulcanize/vulcanizedb/pkg/config" "github.com/vulcanize/vulcanizedb/pkg/config"
@ -35,6 +37,16 @@ func NewDB(databaseConfig config.Database, node core.Node) (*DB, error) {
if connectErr != nil { if connectErr != nil {
return &DB{}, ErrDBConnectionFailed(connectErr) return &DB{}, ErrDBConnectionFailed(connectErr)
} }
if databaseConfig.MaxOpen > 0 {
db.SetMaxOpenConns(databaseConfig.MaxOpen)
}
if databaseConfig.MaxIdle > 0 {
db.SetMaxIdleConns(databaseConfig.MaxIdle)
}
if databaseConfig.MaxLifetime > 0 {
lifetime := time.Duration(databaseConfig.MaxLifetime) * time.Second
db.SetConnMaxLifetime(lifetime)
}
pg := DB{DB: db, Node: node} pg := DB{DB: db, Node: node}
nodeErr := pg.CreateNode(&node) nodeErr := pg.CreateNode(&node)
if nodeErr != nil { if nodeErr != nil {

View File

@ -69,11 +69,11 @@ type BackFillService struct {
// NewBackFillService returns a new BackFillInterface // NewBackFillService returns a new BackFillInterface
func NewBackFillService(settings *Config, screenAndServeChan chan shared.ConvertedData) (BackFillInterface, error) { func NewBackFillService(settings *Config, screenAndServeChan chan shared.ConvertedData) (BackFillInterface, error) {
publisher, err := NewIPLDPublisher(settings.Chain, settings.IPFSPath, settings.DB, settings.IPFSMode) publisher, err := NewIPLDPublisher(settings.Chain, settings.IPFSPath, settings.BackFillDBConn, settings.IPFSMode)
if err != nil { if err != nil {
return nil, err return nil, err
} }
indexer, err := NewCIDIndexer(settings.Chain, settings.DB, settings.IPFSMode) indexer, err := NewCIDIndexer(settings.Chain, settings.BackFillDBConn, settings.IPFSMode)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -81,7 +81,7 @@ func NewBackFillService(settings *Config, screenAndServeChan chan shared.Convert
if err != nil { if err != nil {
return nil, err return nil, err
} }
retriever, err := NewCIDRetriever(settings.Chain, settings.DB) retriever, err := NewCIDRetriever(settings.Chain, settings.BackFillDBConn)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -45,6 +45,18 @@ const (
SUPERNODE_BATCH_SIZE = "SUPERNODE_BATCH_SIZE" SUPERNODE_BATCH_SIZE = "SUPERNODE_BATCH_SIZE"
SUPERNODE_BATCH_NUMBER = "SUPERNODE_BATCH_NUMBER" SUPERNODE_BATCH_NUMBER = "SUPERNODE_BATCH_NUMBER"
SUPERNODE_VALIDATION_LEVEL = "SUPERNODE_VALIDATION_LEVEL" SUPERNODE_VALIDATION_LEVEL = "SUPERNODE_VALIDATION_LEVEL"
SYNC_MAX_IDLE_CONNECTIONS = "SYNC_MAX_IDLE_CONNECTIONS"
SYNC_MAX_OPEN_CONNECTIONS = "SYNC_MAX_OPEN_CONNECTIONS"
SYNC_MAX_CONN_LIFETIME = "SYNC_MAX_CONN_LIFETIME"
BACKFILL_MAX_IDLE_CONNECTIONS = "BACKFILL_MAX_IDLE_CONNECTIONS"
BACKFILL_MAX_OPEN_CONNECTIONS = "BACKFILL_MAX_OPEN_CONNECTIONS"
BACKFILL_MAX_CONN_LIFETIME = "BACKFILL_MAX_CONN_LIFETIME"
SERVER_MAX_IDLE_CONNECTIONS = "SERVER_MAX_IDLE_CONNECTIONS"
SERVER_MAX_OPEN_CONNECTIONS = "SERVER_MAX_OPEN_CONNECTIONS"
SERVER_MAX_CONN_LIFETIME = "SERVER_MAX_CONN_LIFETIME"
) )
// Config struct // Config struct
@ -53,21 +65,23 @@ type Config struct {
Chain shared.ChainType Chain shared.ChainType
IPFSPath string IPFSPath string
IPFSMode shared.IPFSMode IPFSMode shared.IPFSMode
DB *postgres.DB
DBConfig config.Database DBConfig config.Database
Quit chan bool Quit chan bool
// Server fields // Server fields
Serve bool Serve bool
ServeDBConn *postgres.DB
WSEndpoint string WSEndpoint string
HTTPEndpoint string HTTPEndpoint string
IPCEndpoint string IPCEndpoint string
// Sync params // Sync params
Sync bool Sync bool
Workers int SyncDBConn *postgres.DB
WSClient interface{} Workers int
NodeInfo core.Node WSClient interface{}
NodeInfo core.Node
// Backfiller params // Backfiller params
BackFill bool BackFill bool
BackFillDBConn *postgres.DB
HTTPClient interface{} HTTPClient interface{}
Frequency time.Duration Frequency time.Duration
BatchSize uint64 BatchSize uint64
@ -110,6 +124,8 @@ func NewSuperNodeConfig() (*Config, error) {
} }
} }
c.DBConfig.Init()
c.Sync = viper.GetBool("superNode.sync") c.Sync = viper.GetBool("superNode.sync")
if c.Sync { if c.Sync {
workers := viper.GetInt("superNode.workers") workers := viper.GetInt("superNode.workers")
@ -128,6 +144,9 @@ func NewSuperNodeConfig() (*Config, error) {
btcWS := viper.GetString("bitcoin.wsPath") btcWS := viper.GetString("bitcoin.wsPath")
c.NodeInfo, c.WSClient = shared.GetBtcNodeAndClient(btcWS) c.NodeInfo, c.WSClient = shared.GetBtcNodeAndClient(btcWS)
} }
syncDBConn := overrideDBConnConfig(c.DBConfig, Sync)
syncDB := utils.LoadPostgres(syncDBConn, c.NodeInfo)
c.SyncDBConn = &syncDB
} }
c.Serve = viper.GetBool("superNode.server") c.Serve = viper.GetBool("superNode.server")
@ -151,6 +170,9 @@ func NewSuperNodeConfig() (*Config, error) {
httpPath = "127.0.0.1:8081" httpPath = "127.0.0.1:8081"
} }
c.HTTPEndpoint = httpPath c.HTTPEndpoint = httpPath
serveDBConn := overrideDBConnConfig(c.DBConfig, Serve)
serveDB := utils.LoadPostgres(serveDBConn, c.NodeInfo)
c.ServeDBConn = &serveDB
} }
c.BackFill = viper.GetBool("superNode.backFill") c.BackFill = viper.GetBool("superNode.backFill")
@ -160,9 +182,6 @@ func NewSuperNodeConfig() (*Config, error) {
} }
} }
c.DBConfig.Init()
db := utils.LoadPostgres(c.DBConfig, c.NodeInfo)
c.DB = &db
c.Quit = make(chan bool) c.Quit = make(chan bool)
return c, nil return c, nil
@ -209,5 +228,45 @@ func (c *Config) BackFillFields() error {
c.BatchSize = uint64(viper.GetInt64("superNode.batchSize")) c.BatchSize = uint64(viper.GetInt64("superNode.batchSize"))
c.BatchNumber = uint64(viper.GetInt64("superNode.batchNumber")) c.BatchNumber = uint64(viper.GetInt64("superNode.batchNumber"))
c.ValidationLevel = viper.GetInt("superNode.validationLevel") c.ValidationLevel = viper.GetInt("superNode.validationLevel")
backFillDBConn := overrideDBConnConfig(c.DBConfig, BackFill)
backFillDB := utils.LoadPostgres(backFillDBConn, c.NodeInfo)
c.BackFillDBConn = &backFillDB
return nil return nil
} }
type mode string
var (
Sync mode = "sync"
BackFill mode = "backFill"
Serve mode = "serve"
)
func overrideDBConnConfig(con config.Database, m mode) config.Database {
switch m {
case Sync:
viper.BindEnv("database.sync.maxIdle", SYNC_MAX_IDLE_CONNECTIONS)
viper.BindEnv("database.sync.maxOpen", SYNC_MAX_OPEN_CONNECTIONS)
viper.BindEnv("database.sync.maxLifetime", SYNC_MAX_CONN_LIFETIME)
con.MaxIdle = viper.GetInt("database.sync.maxIdle")
con.MaxOpen = viper.GetInt("database.sync.maxOpen")
con.MaxLifetime = viper.GetInt("database.sync.maxLifetime")
case BackFill:
viper.BindEnv("database.backFill.maxIdle", BACKFILL_MAX_IDLE_CONNECTIONS)
viper.BindEnv("database.backFill.maxOpen", BACKFILL_MAX_OPEN_CONNECTIONS)
viper.BindEnv("database.backFill.maxLifetime", BACKFILL_MAX_CONN_LIFETIME)
con.MaxIdle = viper.GetInt("database.backFill.maxIdle")
con.MaxOpen = viper.GetInt("database.backFill.maxOpen")
con.MaxLifetime = viper.GetInt("database.backFill.maxLifetime")
case Serve:
viper.BindEnv("database.server.maxIdle", SERVER_MAX_IDLE_CONNECTIONS)
viper.BindEnv("database.server.maxOpen", SERVER_MAX_OPEN_CONNECTIONS)
viper.BindEnv("database.server.maxLifetime", SERVER_MAX_CONN_LIFETIME)
con.MaxIdle = viper.GetInt("database.server.maxIdle")
con.MaxOpen = viper.GetInt("database.server.maxOpen")
con.MaxLifetime = viper.GetInt("database.server.maxLifetime")
default:
}
return con
}

View File

@ -109,11 +109,11 @@ func NewSuperNode(settings *Config) (SuperNode, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
sn.Publisher, err = NewIPLDPublisher(settings.Chain, settings.IPFSPath, settings.DB, settings.IPFSMode) sn.Publisher, err = NewIPLDPublisher(settings.Chain, settings.IPFSPath, settings.SyncDBConn, settings.IPFSMode)
if err != nil { if err != nil {
return nil, err return nil, err
} }
sn.Indexer, err = NewCIDIndexer(settings.Chain, settings.DB, settings.IPFSMode) sn.Indexer, err = NewCIDIndexer(settings.Chain, settings.SyncDBConn, settings.IPFSMode)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -124,14 +124,15 @@ func NewSuperNode(settings *Config) (SuperNode, error) {
} }
// If we are serving, initialize the needed interfaces // If we are serving, initialize the needed interfaces
if settings.Serve { if settings.Serve {
sn.Retriever, err = NewCIDRetriever(settings.Chain, settings.DB) sn.Retriever, err = NewCIDRetriever(settings.Chain, settings.ServeDBConn)
if err != nil { if err != nil {
return nil, err return nil, err
} }
sn.IPLDFetcher, err = NewIPLDFetcher(settings.Chain, settings.IPFSPath, settings.DB, settings.IPFSMode) sn.IPLDFetcher, err = NewIPLDFetcher(settings.Chain, settings.IPFSPath, settings.ServeDBConn, settings.IPFSMode)
if err != nil { if err != nil {
return nil, err return nil, err
} }
sn.db = settings.ServeDBConn
} }
sn.QuitChan = settings.Quit sn.QuitChan = settings.Quit
sn.Subscriptions = make(map[common.Hash]map[rpc.ID]Subscription) sn.Subscriptions = make(map[common.Hash]map[rpc.ID]Subscription)
@ -140,7 +141,6 @@ func NewSuperNode(settings *Config) (SuperNode, error) {
sn.NodeInfo = &settings.NodeInfo sn.NodeInfo = &settings.NodeInfo
sn.ipfsPath = settings.IPFSPath sn.ipfsPath = settings.IPFSPath
sn.chain = settings.Chain sn.chain = settings.Chain
sn.db = settings.DB
return sn, nil return sn, nil
} }