Refactor tests for watched addressess methods

This commit is contained in:
Prathamesh Musale 2022-08-16 14:22:32 +05:30
parent 3e896c6c91
commit d6563c079c
18 changed files with 1091 additions and 1457 deletions

View File

@ -86,11 +86,13 @@ func dumpCSVFileData(t *testing.T) {
}
}
func dumpWatchedAddressesCSVFileData(t *testing.T) {
func resetAndDumpWatchedAddressesCSVFileData(t *testing.T) {
test_helpers.TearDownDB(t, db)
outputFilePath := filepath.Join(dbDirectory, file.CSVTestConfig.WatchedAddressesFilePath)
stmt := fmt.Sprintf(pgCopyStatement, types.TableWatchedAddresses.Name, outputFilePath)
_, err = sqlxdb.Exec(stmt)
_, err = db.Exec(context.Background(), stmt)
require.NoError(t, err)
}
@ -98,8 +100,7 @@ func tearDownCSV(t *testing.T) {
test_helpers.TearDownDB(t, db)
require.NoError(t, db.Close())
err := os.RemoveAll(file.CSVTestConfig.OutputDir)
require.NoError(t, err)
require.NoError(t, os.RemoveAll(file.CSVTestConfig.OutputDir))
if err := os.Remove(file.CSVTestConfig.WatchedAddressesFilePath); !errors.Is(err, os.ErrNotExist) {
require.NoError(t, err)

View File

@ -19,6 +19,7 @@ package file_test
import (
"context"
"errors"
"math/big"
"os"
"testing"
@ -162,71 +163,93 @@ func TestCSVFileIndexerNonCanonical(t *testing.T) {
})
}
// func TestCSVFileWatchAddressMethods(t *testing.T) {
// setupCSVIndexer(t)
// defer tearDownCSV(t)
func TestCSVFileWatchAddressMethods(t *testing.T) {
setupCSVIndexer(t)
defer tearDownCSV(t)
// t.Run("Load watched addresses (empty table)", func(t *testing.T) {
// testLoadEmptyWatchedAddresses(t)
// })
t.Run("Load watched addresses (empty table)", func(t *testing.T) {
test.TestLoadEmptyWatchedAddresses(t, ind)
})
// t.Run("Insert watched addresses", func(t *testing.T) {
// testInsertWatchedAddresses(t, func(t *testing.T) {
// test_helpers.TearDownDB(t, db)
// dumpWatchedAddressesCSVFileData(t)
// })
// })
t.Run("Insert watched addresses", func(t *testing.T) {
args := mocks.GetInsertWatchedAddressesArgs()
err = ind.InsertWatchedAddresses(args, big.NewInt(int64(mocks.WatchedAt1)))
require.NoError(t, err)
// t.Run("Insert watched addresses (some already watched)", func(t *testing.T) {
// testInsertAlreadyWatchedAddresses(t, func(t *testing.T) {
// test_helpers.TearDownDB(t, db)
// dumpWatchedAddressesCSVFileData(t)
// })
// })
resetAndDumpWatchedAddressesCSVFileData(t)
// t.Run("Remove watched addresses", func(t *testing.T) {
// testRemoveWatchedAddresses(t, func(t *testing.T) {
// test_helpers.TearDownDB(t, db)
// dumpWatchedAddressesCSVFileData(t)
// })
// })
test.TestInsertWatchedAddresses(t, db)
})
// t.Run("Remove watched addresses (some non-watched)", func(t *testing.T) {
// testRemoveNonWatchedAddresses(t, func(t *testing.T) {
// test_helpers.TearDownDB(t, db)
// dumpWatchedAddressesCSVFileData(t)
// })
// })
t.Run("Insert watched addresses (some already watched)", func(t *testing.T) {
args := mocks.GetInsertAlreadyWatchedAddressesArgs()
err = ind.InsertWatchedAddresses(args, big.NewInt(int64(mocks.WatchedAt2)))
require.NoError(t, err)
// t.Run("Set watched addresses", func(t *testing.T) {
// testSetWatchedAddresses(t, func(t *testing.T) {
// test_helpers.TearDownDB(t, db)
// dumpWatchedAddressesCSVFileData(t)
// })
// })
resetAndDumpWatchedAddressesCSVFileData(t)
// t.Run("Set watched addresses (some already watched)", func(t *testing.T) {
// testSetAlreadyWatchedAddresses(t, func(t *testing.T) {
// test_helpers.TearDownDB(t, db)
// dumpWatchedAddressesCSVFileData(t)
// })
// })
test.TestInsertAlreadyWatchedAddresses(t, db)
})
// t.Run("Load watched addresses", func(t *testing.T) {
// testLoadWatchedAddresses(t)
// })
t.Run("Remove watched addresses", func(t *testing.T) {
args := mocks.GetRemoveWatchedAddressesArgs()
err = ind.RemoveWatchedAddresses(args)
require.NoError(t, err)
// t.Run("Clear watched addresses", func(t *testing.T) {
// testClearWatchedAddresses(t, func(t *testing.T) {
// test_helpers.TearDownDB(t, db)
// dumpWatchedAddressesCSVFileData(t)
// })
// })
resetAndDumpWatchedAddressesCSVFileData(t)
// t.Run("Clear watched addresses (empty table)", func(t *testing.T) {
// testClearEmptyWatchedAddresses(t, func(t *testing.T) {
// test_helpers.TearDownDB(t, db)
// dumpWatchedAddressesCSVFileData(t)
// })
// })
// }
test.TestRemoveWatchedAddresses(t, db)
})
t.Run("Remove watched addresses (some non-watched)", func(t *testing.T) {
args := mocks.GetRemoveNonWatchedAddressesArgs()
err = ind.RemoveWatchedAddresses(args)
require.NoError(t, err)
resetAndDumpWatchedAddressesCSVFileData(t)
test.TestRemoveNonWatchedAddresses(t, db)
})
t.Run("Set watched addresses", func(t *testing.T) {
args := mocks.GetSetWatchedAddressesArgs()
err = ind.SetWatchedAddresses(args, big.NewInt(int64(mocks.WatchedAt2)))
require.NoError(t, err)
resetAndDumpWatchedAddressesCSVFileData(t)
test.TestSetWatchedAddresses(t, db)
})
t.Run("Set watched addresses (some already watched)", func(t *testing.T) {
args := mocks.GetSetAlreadyWatchedAddressesArgs()
err = ind.SetWatchedAddresses(args, big.NewInt(int64(mocks.WatchedAt3)))
require.NoError(t, err)
resetAndDumpWatchedAddressesCSVFileData(t)
test.TestSetAlreadyWatchedAddresses(t, db)
})
t.Run("Load watched addresses", func(t *testing.T) {
test.TestLoadWatchedAddresses(t, ind)
})
t.Run("Clear watched addresses", func(t *testing.T) {
err = ind.ClearWatchedAddresses()
require.NoError(t, err)
resetAndDumpWatchedAddressesCSVFileData(t)
test.TestClearWatchedAddresses(t, db)
})
t.Run("Clear watched addresses (empty table)", func(t *testing.T) {
err = ind.ClearWatchedAddresses()
require.NoError(t, err)
resetAndDumpWatchedAddressesCSVFileData(t)
test.TestClearEmptyWatchedAddresses(t, db)
})
}

View File

@ -1,385 +0,0 @@
// VulcanizeDB
// Copyright © 2022 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package file_test
import (
"testing"
"github.com/jmoiron/sqlx"
"github.com/stretchr/testify/require"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/statediff/indexer/database/sql"
"github.com/ethereum/go-ethereum/statediff/indexer/database/sql/postgres"
"github.com/ethereum/go-ethereum/statediff/indexer/interfaces"
"github.com/ethereum/go-ethereum/statediff/indexer/test_helpers"
)
var (
db sql.Database
sqlxdb *sqlx.DB
err error
ind interfaces.StateDiffIndexer
)
func expectTrue(t *testing.T, value bool) {
if !value {
t.Fatalf("Assertion failed")
}
}
func resetDB(t *testing.T) {
test_helpers.TearDownDB(t, db)
connStr := postgres.DefaultConfig.DbConnectionString()
sqlxdb, err = sqlx.Connect("postgres", connStr)
if err != nil {
t.Fatalf("failed to connect to db with connection string: %s err: %v", connStr, err)
}
}
func testLoadEmptyWatchedAddresses(t *testing.T) {
expectedData := []common.Address{}
rows, err := ind.LoadWatchedAddresses()
require.NoError(t, err)
expectTrue(t, len(rows) == len(expectedData))
for idx, row := range rows {
require.Equal(t, expectedData[idx], row)
}
}
type res struct {
Address string `db:"address"`
CreatedAt uint64 `db:"created_at"`
WatchedAt uint64 `db:"watched_at"`
LastFilledAt uint64 `db:"last_filled_at"`
}
// func testInsertWatchedAddresses(t *testing.T, resetAndDumpData func(*testing.T)) {
// args := []sdtypes.WatchAddressArg{
// {
// Address: contract1Address,
// CreatedAt: contract1CreatedAt,
// },
// {
// Address: contract2Address,
// CreatedAt: contract2CreatedAt,
// },
// }
// expectedData := []res{
// {
// Address: contract1Address,
// CreatedAt: contract1CreatedAt,
// WatchedAt: watchedAt1,
// LastFilledAt: lastFilledAt,
// },
// {
// Address: contract2Address,
// CreatedAt: contract2CreatedAt,
// WatchedAt: watchedAt1,
// LastFilledAt: lastFilledAt,
// },
// }
// err = ind.InsertWatchedAddresses(args, big.NewInt(int64(watchedAt1)))
// require.NoError(t, err)
// resetAndDumpData(t)
// rows := []res{}
// err = sqlxdb.Select(&rows, watchedAddressesPgGet)
// if err != nil {
// t.Fatal(err)
// }
// expectTrue(t, len(rows) == len(expectedData))
// for idx, row := range rows {
// require.Equal(t, expectedData[idx], row)
// }
// }
// func testInsertAlreadyWatchedAddresses(t *testing.T, resetAndDumpData func(*testing.T)) {
// args := []sdtypes.WatchAddressArg{
// {
// Address: contract3Address,
// CreatedAt: contract3CreatedAt,
// },
// {
// Address: contract2Address,
// CreatedAt: contract2CreatedAt,
// },
// }
// expectedData := []res{
// {
// Address: contract1Address,
// CreatedAt: contract1CreatedAt,
// WatchedAt: watchedAt1,
// LastFilledAt: lastFilledAt,
// },
// {
// Address: contract2Address,
// CreatedAt: contract2CreatedAt,
// WatchedAt: watchedAt1,
// LastFilledAt: lastFilledAt,
// },
// {
// Address: contract3Address,
// CreatedAt: contract3CreatedAt,
// WatchedAt: watchedAt2,
// LastFilledAt: lastFilledAt,
// },
// }
// err = ind.InsertWatchedAddresses(args, big.NewInt(int64(watchedAt2)))
// require.NoError(t, err)
// resetAndDumpData(t)
// rows := []res{}
// err = sqlxdb.Select(&rows, watchedAddressesPgGet)
// if err != nil {
// t.Fatal(err)
// }
// expectTrue(t, len(rows) == len(expectedData))
// for idx, row := range rows {
// require.Equal(t, expectedData[idx], row)
// }
// }
// func testRemoveWatchedAddresses(t *testing.T, resetAndDumpData func(*testing.T)) {
// args := []sdtypes.WatchAddressArg{
// {
// Address: contract3Address,
// CreatedAt: contract3CreatedAt,
// },
// {
// Address: contract2Address,
// CreatedAt: contract2CreatedAt,
// },
// }
// expectedData := []res{
// {
// Address: contract1Address,
// CreatedAt: contract1CreatedAt,
// WatchedAt: watchedAt1,
// LastFilledAt: lastFilledAt,
// },
// }
// err = ind.RemoveWatchedAddresses(args)
// require.NoError(t, err)
// resetAndDumpData(t)
// rows := []res{}
// err = sqlxdb.Select(&rows, watchedAddressesPgGet)
// if err != nil {
// t.Fatal(err)
// }
// expectTrue(t, len(rows) == len(expectedData))
// for idx, row := range rows {
// require.Equal(t, expectedData[idx], row)
// }
// }
// func testRemoveNonWatchedAddresses(t *testing.T, resetAndDumpData func(*testing.T)) {
// args := []sdtypes.WatchAddressArg{
// {
// Address: contract1Address,
// CreatedAt: contract1CreatedAt,
// },
// {
// Address: contract2Address,
// CreatedAt: contract2CreatedAt,
// },
// }
// expectedData := []res{}
// err = ind.RemoveWatchedAddresses(args)
// require.NoError(t, err)
// resetAndDumpData(t)
// rows := []res{}
// err = sqlxdb.Select(&rows, watchedAddressesPgGet)
// if err != nil {
// t.Fatal(err)
// }
// expectTrue(t, len(rows) == len(expectedData))
// for idx, row := range rows {
// require.Equal(t, expectedData[idx], row)
// }
// }
// func testSetWatchedAddresses(t *testing.T, resetAndDumpData func(*testing.T)) {
// args := []sdtypes.WatchAddressArg{
// {
// Address: contract1Address,
// CreatedAt: contract1CreatedAt,
// },
// {
// Address: contract2Address,
// CreatedAt: contract2CreatedAt,
// },
// {
// Address: contract3Address,
// CreatedAt: contract3CreatedAt,
// },
// }
// expectedData := []res{
// {
// Address: contract1Address,
// CreatedAt: contract1CreatedAt,
// WatchedAt: watchedAt2,
// LastFilledAt: lastFilledAt,
// },
// {
// Address: contract2Address,
// CreatedAt: contract2CreatedAt,
// WatchedAt: watchedAt2,
// LastFilledAt: lastFilledAt,
// },
// {
// Address: contract3Address,
// CreatedAt: contract3CreatedAt,
// WatchedAt: watchedAt2,
// LastFilledAt: lastFilledAt,
// },
// }
// err = ind.SetWatchedAddresses(args, big.NewInt(int64(watchedAt2)))
// require.NoError(t, err)
// resetAndDumpData(t)
// rows := []res{}
// err = sqlxdb.Select(&rows, watchedAddressesPgGet)
// if err != nil {
// t.Fatal(err)
// }
// expectTrue(t, len(rows) == len(expectedData))
// for idx, row := range rows {
// require.Equal(t, expectedData[idx], row)
// }
// }
// func testSetAlreadyWatchedAddresses(t *testing.T, resetAndDumpData func(*testing.T)) {
// args := []sdtypes.WatchAddressArg{
// {
// Address: contract4Address,
// CreatedAt: contract4CreatedAt,
// },
// {
// Address: contract2Address,
// CreatedAt: contract2CreatedAt,
// },
// {
// Address: contract3Address,
// CreatedAt: contract3CreatedAt,
// },
// }
// expectedData := []res{
// {
// Address: contract4Address,
// CreatedAt: contract4CreatedAt,
// WatchedAt: watchedAt3,
// LastFilledAt: lastFilledAt,
// },
// {
// Address: contract2Address,
// CreatedAt: contract2CreatedAt,
// WatchedAt: watchedAt3,
// LastFilledAt: lastFilledAt,
// },
// {
// Address: contract3Address,
// CreatedAt: contract3CreatedAt,
// WatchedAt: watchedAt3,
// LastFilledAt: lastFilledAt,
// },
// }
// err = ind.SetWatchedAddresses(args, big.NewInt(int64(watchedAt3)))
// require.NoError(t, err)
// resetAndDumpData(t)
// rows := []res{}
// err = sqlxdb.Select(&rows, watchedAddressesPgGet)
// if err != nil {
// t.Fatal(err)
// }
// expectTrue(t, len(rows) == len(expectedData))
// for idx, row := range rows {
// require.Equal(t, expectedData[idx], row)
// }
// }
// func testLoadWatchedAddresses(t *testing.T) {
// expectedData := []common.Address{
// common.HexToAddress(contract4Address),
// common.HexToAddress(contract2Address),
// common.HexToAddress(contract3Address),
// }
// rows, err := ind.LoadWatchedAddresses()
// require.NoError(t, err)
// expectTrue(t, len(rows) == len(expectedData))
// for idx, row := range rows {
// require.Equal(t, expectedData[idx], row)
// }
// }
// func testClearWatchedAddresses(t *testing.T, resetAndDumpData func(*testing.T)) {
// expectedData := []res{}
// err = ind.ClearWatchedAddresses()
// require.NoError(t, err)
// resetAndDumpData(t)
// rows := []res{}
// err = sqlxdb.Select(&rows, watchedAddressesPgGet)
// if err != nil {
// t.Fatal(err)
// }
// expectTrue(t, len(rows) == len(expectedData))
// for idx, row := range rows {
// require.Equal(t, expectedData[idx], row)
// }
// }
// func testClearEmptyWatchedAddresses(t *testing.T, resetAndDumpData func(*testing.T)) {
// expectedData := []res{}
// err = ind.ClearWatchedAddresses()
// require.NoError(t, err)
// resetAndDumpData(t)
// rows := []res{}
// err = sqlxdb.Select(&rows, watchedAddressesPgGet)
// if err != nil {
// t.Fatal(err)
// }
// expectTrue(t, len(rows) == len(expectedData))
// for idx, row := range rows {
// require.Equal(t, expectedData[idx], row)
// }
// }

View File

@ -24,7 +24,6 @@ import (
"os"
"testing"
"github.com/jmoiron/sqlx"
"github.com/stretchr/testify/require"
"github.com/ethereum/go-ethereum/core/types"
@ -40,7 +39,6 @@ import (
var (
err error
db sql.Database
sqlxdb *sqlx.DB
ind interfaces.StateDiffIndexer
chainConf = params.MainnetChainConfig
)

View File

@ -25,11 +25,19 @@ import (
"github.com/stretchr/testify/require"
"github.com/ethereum/go-ethereum/statediff/indexer/database/file"
"github.com/ethereum/go-ethereum/statediff/indexer/database/sql"
"github.com/ethereum/go-ethereum/statediff/indexer/database/sql/postgres"
"github.com/ethereum/go-ethereum/statediff/indexer/interfaces"
"github.com/ethereum/go-ethereum/statediff/indexer/test"
"github.com/ethereum/go-ethereum/statediff/indexer/test_helpers"
)
var (
db sql.Database
err error
ind interfaces.StateDiffIndexer
)
func setupLegacySQLIndexer(t *testing.T) {
if _, err := os.Stat(file.SQLTestConfig.FilePath); !errors.Is(err, os.ErrNotExist) {
err := os.Remove(file.SQLTestConfig.FilePath)
@ -62,12 +70,12 @@ func dumpFileData(t *testing.T) {
}
func resetAndDumpWatchedAddressesFileData(t *testing.T) {
resetDB(t)
test_helpers.TearDownDB(t, db)
sqlFileBytes, err := os.ReadFile(file.SQLTestConfig.WatchedAddressesFilePath)
require.NoError(t, err)
_, err = sqlxdb.Exec(string(sqlFileBytes))
_, err = db.Exec(context.Background(), string(sqlFileBytes))
require.NoError(t, err)
}
@ -75,8 +83,7 @@ func tearDown(t *testing.T) {
test_helpers.TearDownDB(t, db)
require.NoError(t, db.Close())
err := os.Remove(file.SQLTestConfig.FilePath)
require.NoError(t, err)
require.NoError(t, os.Remove(file.SQLTestConfig.FilePath))
if err := os.Remove(file.SQLTestConfig.WatchedAddressesFilePath); !errors.Is(err, os.ErrNotExist) {
require.NoError(t, err)

View File

@ -19,6 +19,7 @@ package file_test
import (
"context"
"errors"
"math/big"
"os"
"testing"
@ -160,47 +161,93 @@ func TestSQLFileIndexerNonCanonical(t *testing.T) {
})
}
// func TestSQLFileWatchAddressMethods(t *testing.T) {
// setupIndexer(t)
// defer tearDown(t)
func TestSQLFileWatchAddressMethods(t *testing.T) {
setupIndexer(t)
defer tearDown(t)
// t.Run("Load watched addresses (empty table)", func(t *testing.T) {
// testLoadEmptyWatchedAddresses(t)
// })
t.Run("Load watched addresses (empty table)", func(t *testing.T) {
test.TestLoadEmptyWatchedAddresses(t, ind)
})
// t.Run("Insert watched addresses", func(t *testing.T) {
// testInsertWatchedAddresses(t, resetAndDumpWatchedAddressesFileData)
// })
t.Run("Insert watched addresses", func(t *testing.T) {
args := mocks.GetInsertWatchedAddressesArgs()
err = ind.InsertWatchedAddresses(args, big.NewInt(int64(mocks.WatchedAt1)))
require.NoError(t, err)
// t.Run("Insert watched addresses (some already watched)", func(t *testing.T) {
// testInsertAlreadyWatchedAddresses(t, resetAndDumpWatchedAddressesFileData)
// })
resetAndDumpWatchedAddressesFileData(t)
// t.Run("Remove watched addresses", func(t *testing.T) {
// testRemoveWatchedAddresses(t, resetAndDumpWatchedAddressesFileData)
// })
test.TestInsertWatchedAddresses(t, db)
})
// t.Run("Remove watched addresses (some non-watched)", func(t *testing.T) {
// testRemoveNonWatchedAddresses(t, resetAndDumpWatchedAddressesFileData)
// })
t.Run("Insert watched addresses (some already watched)", func(t *testing.T) {
args := mocks.GetInsertAlreadyWatchedAddressesArgs()
err = ind.InsertWatchedAddresses(args, big.NewInt(int64(mocks.WatchedAt2)))
require.NoError(t, err)
// t.Run("Set watched addresses", func(t *testing.T) {
// testSetWatchedAddresses(t, resetAndDumpWatchedAddressesFileData)
// })
resetAndDumpWatchedAddressesFileData(t)
// t.Run("Set watched addresses (some already watched)", func(t *testing.T) {
// testSetAlreadyWatchedAddresses(t, resetAndDumpWatchedAddressesFileData)
// })
test.TestInsertAlreadyWatchedAddresses(t, db)
})
// t.Run("Load watched addresses", func(t *testing.T) {
// testLoadWatchedAddresses(t)
// })
t.Run("Remove watched addresses", func(t *testing.T) {
args := mocks.GetRemoveWatchedAddressesArgs()
err = ind.RemoveWatchedAddresses(args)
require.NoError(t, err)
// t.Run("Clear watched addresses", func(t *testing.T) {
// testClearWatchedAddresses(t, resetAndDumpWatchedAddressesFileData)
// })
resetAndDumpWatchedAddressesFileData(t)
// t.Run("Clear watched addresses (empty table)", func(t *testing.T) {
// testClearEmptyWatchedAddresses(t, resetAndDumpWatchedAddressesFileData)
// })
// }
test.TestRemoveWatchedAddresses(t, db)
})
t.Run("Remove watched addresses (some non-watched)", func(t *testing.T) {
args := mocks.GetRemoveNonWatchedAddressesArgs()
err = ind.RemoveWatchedAddresses(args)
require.NoError(t, err)
resetAndDumpWatchedAddressesFileData(t)
test.TestRemoveNonWatchedAddresses(t, db)
})
t.Run("Set watched addresses", func(t *testing.T) {
args := mocks.GetSetWatchedAddressesArgs()
err = ind.SetWatchedAddresses(args, big.NewInt(int64(mocks.WatchedAt2)))
require.NoError(t, err)
resetAndDumpWatchedAddressesFileData(t)
test.TestSetWatchedAddresses(t, db)
})
t.Run("Set watched addresses (some already watched)", func(t *testing.T) {
args := mocks.GetSetAlreadyWatchedAddressesArgs()
err = ind.SetWatchedAddresses(args, big.NewInt(int64(mocks.WatchedAt3)))
require.NoError(t, err)
resetAndDumpWatchedAddressesFileData(t)
test.TestSetAlreadyWatchedAddresses(t, db)
})
t.Run("Load watched addresses", func(t *testing.T) {
test.TestLoadWatchedAddresses(t, ind)
})
t.Run("Clear watched addresses", func(t *testing.T) {
err = ind.ClearWatchedAddresses()
require.NoError(t, err)
resetAndDumpWatchedAddressesFileData(t)
test.TestClearWatchedAddresses(t, db)
})
t.Run("Clear watched addresses (empty table)", func(t *testing.T) {
err = ind.ClearWatchedAddresses()
require.NoError(t, err)
resetAndDumpWatchedAddressesFileData(t)
test.TestClearEmptyWatchedAddresses(t, db)
})
}

View File

@ -16,12 +16,6 @@ var (
ind interfaces.StateDiffIndexer
)
func expectTrue(t *testing.T, value bool) {
if !value {
t.Fatalf("Assertion failed")
}
}
func checkTxClosure(t *testing.T, idle, inUse, open int64) {
require.Equal(t, idle, db.Stats().Idle())
require.Equal(t, inUse, db.Stats().InUse())
@ -30,6 +24,5 @@ func checkTxClosure(t *testing.T, idle, inUse, open int64) {
func tearDown(t *testing.T) {
test_helpers.TearDownDB(t, db)
err := ind.Close()
require.NoError(t, err)
require.NoError(t, ind.Close())
}

View File

@ -68,6 +68,7 @@ func TestMainnetIndexer(t *testing.T) {
func testPushBlockAndState(t *testing.T, block *types.Block, receipts types.Receipts) {
t.Run("Test PushBlock and PushStateNode", func(t *testing.T) {
setupMainnetIndexer(t)
defer checkTxClosure(t, 0, 0, 0)
defer tearDown(t)
test.TestBlock(t, ind, block, receipts)
@ -82,11 +83,13 @@ func setupMainnetIndexer(t *testing.T) {
ind, err = sql.NewStateDiffIndexer(context.Background(), chainConf, db)
}
func tearDown(t *testing.T) {
require.Equal(t, int64(0), db.Stats().Idle())
require.Equal(t, int64(0), db.Stats().InUse())
require.Equal(t, int64(0), db.Stats().Open())
func checkTxClosure(t *testing.T, idle, inUse, open int64) {
require.Equal(t, idle, db.Stats().Idle())
require.Equal(t, inUse, db.Stats().InUse())
require.Equal(t, open, db.Stats().Open())
}
func tearDown(t *testing.T) {
test_helpers.TearDownDB(t, db)
require.NoError(t, ind.Close())
}

View File

@ -43,7 +43,7 @@ func setupLegacyPGX(t *testing.T) {
func TestLegacyPGXIndexer(t *testing.T) {
t.Run("Publish and index header IPLDs", func(t *testing.T) {
setupLegacySQLX(t)
setupLegacyPGX(t)
defer tearDown(t)
defer checkTxClosure(t, 0, 0, 0)

View File

@ -18,6 +18,7 @@ package sql_test
import (
"context"
"math/big"
"testing"
"github.com/stretchr/testify/require"
@ -149,333 +150,78 @@ func TestPGXIndexerNonCanonical(t *testing.T) {
})
}
// func TestPGXWatchAddressMethods(t *testing.T) {
// setupPGXIndexer(t)
// defer tearDown(t)
// defer checkTxClosure(t, 1, 0, 1)
func TestPGXWatchAddressMethods(t *testing.T) {
setupPGXIndexer(t)
defer tearDown(t)
defer checkTxClosure(t, 1, 0, 1)
// type res struct {
// Address string `db:"address"`
// CreatedAt uint64 `db:"created_at"`
// WatchedAt uint64 `db:"watched_at"`
// LastFilledAt uint64 `db:"last_filled_at"`
// }
// pgStr := "SELECT * FROM eth_meta.watched_addresses"
t.Run("Load watched addresses (empty table)", func(t *testing.T) {
test.TestLoadEmptyWatchedAddresses(t, ind)
})
// t.Run("Load watched addresses (empty table)", func(t *testing.T) {
// expectedData := []common.Address{}
t.Run("Insert watched addresses", func(t *testing.T) {
args := mocks.GetInsertWatchedAddressesArgs()
err = ind.InsertWatchedAddresses(args, big.NewInt(int64(mocks.WatchedAt1)))
require.NoError(t, err)
// rows, err := ind.LoadWatchedAddresses()
// require.NoError(t, err)
test.TestInsertWatchedAddresses(t, db)
})
// expectTrue(t, len(rows) == len(expectedData))
// for idx, row := range rows {
// require.Equal(t, expectedData[idx], row)
// }
// })
t.Run("Insert watched addresses (some already watched)", func(t *testing.T) {
args := mocks.GetInsertAlreadyWatchedAddressesArgs()
err = ind.InsertWatchedAddresses(args, big.NewInt(int64(mocks.WatchedAt2)))
require.NoError(t, err)
// t.Run("Insert watched addresses", func(t *testing.T) {
// args := []sdtypes.WatchAddressArg{
// {
// Address: contract1Address,
// CreatedAt: contract1CreatedAt,
// },
// {
// Address: contract2Address,
// CreatedAt: contract2CreatedAt,
// },
// }
// expectedData := []res{
// {
// Address: contract1Address,
// CreatedAt: contract1CreatedAt,
// WatchedAt: watchedAt1,
// LastFilledAt: lastFilledAt,
// },
// {
// Address: contract2Address,
// CreatedAt: contract2CreatedAt,
// WatchedAt: watchedAt1,
// LastFilledAt: lastFilledAt,
// },
// }
test.TestInsertAlreadyWatchedAddresses(t, db)
})
// err = ind.InsertWatchedAddresses(args, big.NewInt(int64(watchedAt1)))
// require.NoError(t, err)
t.Run("Remove watched addresses", func(t *testing.T) {
args := mocks.GetRemoveWatchedAddressesArgs()
err = ind.RemoveWatchedAddresses(args)
require.NoError(t, err)
// rows := []res{}
// err = db.Select(context.Background(), &rows, pgStr)
// if err != nil {
// t.Fatal(err)
// }
test.TestRemoveWatchedAddresses(t, db)
})
// expectTrue(t, len(rows) == len(expectedData))
// for idx, row := range rows {
// require.Equal(t, expectedData[idx], row)
// }
// })
t.Run("Remove watched addresses (some non-watched)", func(t *testing.T) {
args := mocks.GetRemoveNonWatchedAddressesArgs()
err = ind.RemoveWatchedAddresses(args)
require.NoError(t, err)
// t.Run("Insert watched addresses (some already watched)", func(t *testing.T) {
// args := []sdtypes.WatchAddressArg{
// {
// Address: contract3Address,
// CreatedAt: contract3CreatedAt,
// },
// {
// Address: contract2Address,
// CreatedAt: contract2CreatedAt,
// },
// }
// expectedData := []res{
// {
// Address: contract1Address,
// CreatedAt: contract1CreatedAt,
// WatchedAt: watchedAt1,
// LastFilledAt: lastFilledAt,
// },
// {
// Address: contract2Address,
// CreatedAt: contract2CreatedAt,
// WatchedAt: watchedAt1,
// LastFilledAt: lastFilledAt,
// },
// {
// Address: contract3Address,
// CreatedAt: contract3CreatedAt,
// WatchedAt: watchedAt2,
// LastFilledAt: lastFilledAt,
// },
// }
test.TestRemoveNonWatchedAddresses(t, db)
})
// err = ind.InsertWatchedAddresses(args, big.NewInt(int64(watchedAt2)))
// require.NoError(t, err)
t.Run("Set watched addresses", func(t *testing.T) {
args := mocks.GetSetWatchedAddressesArgs()
err = ind.SetWatchedAddresses(args, big.NewInt(int64(mocks.WatchedAt2)))
require.NoError(t, err)
// rows := []res{}
// err = db.Select(context.Background(), &rows, pgStr)
// if err != nil {
// t.Fatal(err)
// }
test.TestSetWatchedAddresses(t, db)
})
// expectTrue(t, len(rows) == len(expectedData))
// for idx, row := range rows {
// require.Equal(t, expectedData[idx], row)
// }
// })
t.Run("Set watched addresses (some already watched)", func(t *testing.T) {
args := mocks.GetSetAlreadyWatchedAddressesArgs()
err = ind.SetWatchedAddresses(args, big.NewInt(int64(mocks.WatchedAt3)))
require.NoError(t, err)
// t.Run("Remove watched addresses", func(t *testing.T) {
// args := []sdtypes.WatchAddressArg{
// {
// Address: contract3Address,
// CreatedAt: contract3CreatedAt,
// },
// {
// Address: contract2Address,
// CreatedAt: contract2CreatedAt,
// },
// }
// expectedData := []res{
// {
// Address: contract1Address,
// CreatedAt: contract1CreatedAt,
// WatchedAt: watchedAt1,
// LastFilledAt: lastFilledAt,
// },
// }
test.TestSetAlreadyWatchedAddresses(t, db)
})
// err = ind.RemoveWatchedAddresses(args)
// require.NoError(t, err)
t.Run("Load watched addresses", func(t *testing.T) {
test.TestLoadWatchedAddresses(t, ind)
})
// rows := []res{}
// err = db.Select(context.Background(), &rows, pgStr)
// if err != nil {
// t.Fatal(err)
// }
t.Run("Clear watched addresses", func(t *testing.T) {
err = ind.ClearWatchedAddresses()
require.NoError(t, err)
// expectTrue(t, len(rows) == len(expectedData))
// for idx, row := range rows {
// require.Equal(t, expectedData[idx], row)
// }
// })
test.TestClearWatchedAddresses(t, db)
})
// t.Run("Remove watched addresses (some non-watched)", func(t *testing.T) {
// args := []sdtypes.WatchAddressArg{
// {
// Address: contract1Address,
// CreatedAt: contract1CreatedAt,
// },
// {
// Address: contract2Address,
// CreatedAt: contract2CreatedAt,
// },
// }
// expectedData := []res{}
t.Run("Clear watched addresses (empty table)", func(t *testing.T) {
err = ind.ClearWatchedAddresses()
require.NoError(t, err)
// err = ind.RemoveWatchedAddresses(args)
// require.NoError(t, err)
// rows := []res{}
// err = db.Select(context.Background(), &rows, pgStr)
// if err != nil {
// t.Fatal(err)
// }
// expectTrue(t, len(rows) == len(expectedData))
// for idx, row := range rows {
// require.Equal(t, expectedData[idx], row)
// }
// })
// t.Run("Set watched addresses", func(t *testing.T) {
// args := []sdtypes.WatchAddressArg{
// {
// Address: contract1Address,
// CreatedAt: contract1CreatedAt,
// },
// {
// Address: contract2Address,
// CreatedAt: contract2CreatedAt,
// },
// {
// Address: contract3Address,
// CreatedAt: contract3CreatedAt,
// },
// }
// expectedData := []res{
// {
// Address: contract1Address,
// CreatedAt: contract1CreatedAt,
// WatchedAt: watchedAt2,
// LastFilledAt: lastFilledAt,
// },
// {
// Address: contract2Address,
// CreatedAt: contract2CreatedAt,
// WatchedAt: watchedAt2,
// LastFilledAt: lastFilledAt,
// },
// {
// Address: contract3Address,
// CreatedAt: contract3CreatedAt,
// WatchedAt: watchedAt2,
// LastFilledAt: lastFilledAt,
// },
// }
// err = ind.SetWatchedAddresses(args, big.NewInt(int64(watchedAt2)))
// require.NoError(t, err)
// rows := []res{}
// err = db.Select(context.Background(), &rows, pgStr)
// if err != nil {
// t.Fatal(err)
// }
// expectTrue(t, len(rows) == len(expectedData))
// for idx, row := range rows {
// require.Equal(t, expectedData[idx], row)
// }
// })
// t.Run("Set watched addresses (some already watched)", func(t *testing.T) {
// args := []sdtypes.WatchAddressArg{
// {
// Address: contract4Address,
// CreatedAt: contract4CreatedAt,
// },
// {
// Address: contract2Address,
// CreatedAt: contract2CreatedAt,
// },
// {
// Address: contract3Address,
// CreatedAt: contract3CreatedAt,
// },
// }
// expectedData := []res{
// {
// Address: contract4Address,
// CreatedAt: contract4CreatedAt,
// WatchedAt: watchedAt3,
// LastFilledAt: lastFilledAt,
// },
// {
// Address: contract2Address,
// CreatedAt: contract2CreatedAt,
// WatchedAt: watchedAt3,
// LastFilledAt: lastFilledAt,
// },
// {
// Address: contract3Address,
// CreatedAt: contract3CreatedAt,
// WatchedAt: watchedAt3,
// LastFilledAt: lastFilledAt,
// },
// }
// err = ind.SetWatchedAddresses(args, big.NewInt(int64(watchedAt3)))
// require.NoError(t, err)
// rows := []res{}
// err = db.Select(context.Background(), &rows, pgStr)
// if err != nil {
// t.Fatal(err)
// }
// expectTrue(t, len(rows) == len(expectedData))
// for idx, row := range rows {
// require.Equal(t, expectedData[idx], row)
// }
// })
// t.Run("Load watched addresses", func(t *testing.T) {
// expectedData := []common.Address{
// common.HexToAddress(contract4Address),
// common.HexToAddress(contract2Address),
// common.HexToAddress(contract3Address),
// }
// rows, err := ind.LoadWatchedAddresses()
// require.NoError(t, err)
// expectTrue(t, len(rows) == len(expectedData))
// for idx, row := range rows {
// require.Equal(t, expectedData[idx], row)
// }
// })
// t.Run("Clear watched addresses", func(t *testing.T) {
// expectedData := []res{}
// err = ind.ClearWatchedAddresses()
// require.NoError(t, err)
// rows := []res{}
// err = db.Select(context.Background(), &rows, pgStr)
// if err != nil {
// t.Fatal(err)
// }
// expectTrue(t, len(rows) == len(expectedData))
// for idx, row := range rows {
// require.Equal(t, expectedData[idx], row)
// }
// })
// t.Run("Clear watched addresses (empty table)", func(t *testing.T) {
// expectedData := []res{}
// err = ind.ClearWatchedAddresses()
// require.NoError(t, err)
// rows := []res{}
// err = db.Select(context.Background(), &rows, pgStr)
// if err != nil {
// t.Fatal(err)
// }
// expectTrue(t, len(rows) == len(expectedData))
// for idx, row := range rows {
// require.Equal(t, expectedData[idx], row)
// }
// })
// }
test.TestClearEmptyWatchedAddresses(t, db)
})
}

View File

@ -18,6 +18,7 @@ package sql_test
import (
"context"
"math/big"
"testing"
"github.com/stretchr/testify/require"
@ -149,333 +150,78 @@ func TestSQLXIndexerNonCanonical(t *testing.T) {
})
}
// func TestSQLXWatchAddressMethods(t *testing.T) {
// setupSQLXIndexer(t)
// defer tearDown(t)
// defer checkTxClosure(t, 0, 0, 0)
func TestSQLXWatchAddressMethods(t *testing.T) {
setupSQLXIndexer(t)
defer tearDown(t)
defer checkTxClosure(t, 0, 0, 0)
// type res struct {
// Address string `db:"address"`
// CreatedAt uint64 `db:"created_at"`
// WatchedAt uint64 `db:"watched_at"`
// LastFilledAt uint64 `db:"last_filled_at"`
// }
// pgStr := "SELECT * FROM eth_meta.watched_addresses"
t.Run("Load watched addresses (empty table)", func(t *testing.T) {
test.TestLoadEmptyWatchedAddresses(t, ind)
})
// t.Run("Load watched addresses (empty table)", func(t *testing.T) {
// expectedData := []common.Address{}
t.Run("Insert watched addresses", func(t *testing.T) {
args := mocks.GetInsertWatchedAddressesArgs()
err = ind.InsertWatchedAddresses(args, big.NewInt(int64(mocks.WatchedAt1)))
require.NoError(t, err)
// rows, err := ind.LoadWatchedAddresses()
// require.NoError(t, err)
test.TestInsertWatchedAddresses(t, db)
})
// expectTrue(t, len(rows) == len(expectedData))
// for idx, row := range rows {
// require.Equal(t, expectedData[idx], row)
// }
// })
t.Run("Insert watched addresses (some already watched)", func(t *testing.T) {
args := mocks.GetInsertAlreadyWatchedAddressesArgs()
err = ind.InsertWatchedAddresses(args, big.NewInt(int64(mocks.WatchedAt2)))
require.NoError(t, err)
// t.Run("Insert watched addresses", func(t *testing.T) {
// args := []sdtypes.WatchAddressArg{
// {
// Address: contract1Address,
// CreatedAt: contract1CreatedAt,
// },
// {
// Address: contract2Address,
// CreatedAt: contract2CreatedAt,
// },
// }
// expectedData := []res{
// {
// Address: contract1Address,
// CreatedAt: contract1CreatedAt,
// WatchedAt: watchedAt1,
// LastFilledAt: lastFilledAt,
// },
// {
// Address: contract2Address,
// CreatedAt: contract2CreatedAt,
// WatchedAt: watchedAt1,
// LastFilledAt: lastFilledAt,
// },
// }
test.TestInsertAlreadyWatchedAddresses(t, db)
})
// err = ind.InsertWatchedAddresses(args, big.NewInt(int64(watchedAt1)))
// require.NoError(t, err)
t.Run("Remove watched addresses", func(t *testing.T) {
args := mocks.GetRemoveWatchedAddressesArgs()
err = ind.RemoveWatchedAddresses(args)
require.NoError(t, err)
// rows := []res{}
// err = db.Select(context.Background(), &rows, pgStr)
// if err != nil {
// t.Fatal(err)
// }
test.TestRemoveWatchedAddresses(t, db)
})
// expectTrue(t, len(rows) == len(expectedData))
// for idx, row := range rows {
// require.Equal(t, expectedData[idx], row)
// }
// })
t.Run("Remove watched addresses (some non-watched)", func(t *testing.T) {
args := mocks.GetRemoveNonWatchedAddressesArgs()
err = ind.RemoveWatchedAddresses(args)
require.NoError(t, err)
// t.Run("Insert watched addresses (some already watched)", func(t *testing.T) {
// args := []sdtypes.WatchAddressArg{
// {
// Address: contract3Address,
// CreatedAt: contract3CreatedAt,
// },
// {
// Address: contract2Address,
// CreatedAt: contract2CreatedAt,
// },
// }
// expectedData := []res{
// {
// Address: contract1Address,
// CreatedAt: contract1CreatedAt,
// WatchedAt: watchedAt1,
// LastFilledAt: lastFilledAt,
// },
// {
// Address: contract2Address,
// CreatedAt: contract2CreatedAt,
// WatchedAt: watchedAt1,
// LastFilledAt: lastFilledAt,
// },
// {
// Address: contract3Address,
// CreatedAt: contract3CreatedAt,
// WatchedAt: watchedAt2,
// LastFilledAt: lastFilledAt,
// },
// }
test.TestRemoveNonWatchedAddresses(t, db)
})
// err = ind.InsertWatchedAddresses(args, big.NewInt(int64(watchedAt2)))
// require.NoError(t, err)
t.Run("Set watched addresses", func(t *testing.T) {
args := mocks.GetSetWatchedAddressesArgs()
err = ind.SetWatchedAddresses(args, big.NewInt(int64(mocks.WatchedAt2)))
require.NoError(t, err)
// rows := []res{}
// err = db.Select(context.Background(), &rows, pgStr)
// if err != nil {
// t.Fatal(err)
// }
test.TestSetWatchedAddresses(t, db)
})
// expectTrue(t, len(rows) == len(expectedData))
// for idx, row := range rows {
// require.Equal(t, expectedData[idx], row)
// }
// })
t.Run("Set watched addresses (some already watched)", func(t *testing.T) {
args := mocks.GetSetAlreadyWatchedAddressesArgs()
err = ind.SetWatchedAddresses(args, big.NewInt(int64(mocks.WatchedAt3)))
require.NoError(t, err)
// t.Run("Remove watched addresses", func(t *testing.T) {
// args := []sdtypes.WatchAddressArg{
// {
// Address: contract3Address,
// CreatedAt: contract3CreatedAt,
// },
// {
// Address: contract2Address,
// CreatedAt: contract2CreatedAt,
// },
// }
// expectedData := []res{
// {
// Address: contract1Address,
// CreatedAt: contract1CreatedAt,
// WatchedAt: watchedAt1,
// LastFilledAt: lastFilledAt,
// },
// }
test.TestSetAlreadyWatchedAddresses(t, db)
})
// err = ind.RemoveWatchedAddresses(args)
// require.NoError(t, err)
t.Run("Load watched addresses", func(t *testing.T) {
test.TestLoadWatchedAddresses(t, ind)
})
// rows := []res{}
// err = db.Select(context.Background(), &rows, pgStr)
// if err != nil {
// t.Fatal(err)
// }
t.Run("Clear watched addresses", func(t *testing.T) {
err = ind.ClearWatchedAddresses()
require.NoError(t, err)
// expectTrue(t, len(rows) == len(expectedData))
// for idx, row := range rows {
// require.Equal(t, expectedData[idx], row)
// }
// })
test.TestClearWatchedAddresses(t, db)
})
// t.Run("Remove watched addresses (some non-watched)", func(t *testing.T) {
// args := []sdtypes.WatchAddressArg{
// {
// Address: contract1Address,
// CreatedAt: contract1CreatedAt,
// },
// {
// Address: contract2Address,
// CreatedAt: contract2CreatedAt,
// },
// }
// expectedData := []res{}
t.Run("Clear watched addresses (empty table)", func(t *testing.T) {
err = ind.ClearWatchedAddresses()
require.NoError(t, err)
// err = ind.RemoveWatchedAddresses(args)
// require.NoError(t, err)
// rows := []res{}
// err = db.Select(context.Background(), &rows, pgStr)
// if err != nil {
// t.Fatal(err)
// }
// expectTrue(t, len(rows) == len(expectedData))
// for idx, row := range rows {
// require.Equal(t, expectedData[idx], row)
// }
// })
// t.Run("Set watched addresses", func(t *testing.T) {
// args := []sdtypes.WatchAddressArg{
// {
// Address: contract1Address,
// CreatedAt: contract1CreatedAt,
// },
// {
// Address: contract2Address,
// CreatedAt: contract2CreatedAt,
// },
// {
// Address: contract3Address,
// CreatedAt: contract3CreatedAt,
// },
// }
// expectedData := []res{
// {
// Address: contract1Address,
// CreatedAt: contract1CreatedAt,
// WatchedAt: watchedAt2,
// LastFilledAt: lastFilledAt,
// },
// {
// Address: contract2Address,
// CreatedAt: contract2CreatedAt,
// WatchedAt: watchedAt2,
// LastFilledAt: lastFilledAt,
// },
// {
// Address: contract3Address,
// CreatedAt: contract3CreatedAt,
// WatchedAt: watchedAt2,
// LastFilledAt: lastFilledAt,
// },
// }
// err = ind.SetWatchedAddresses(args, big.NewInt(int64(watchedAt2)))
// require.NoError(t, err)
// rows := []res{}
// err = db.Select(context.Background(), &rows, pgStr)
// if err != nil {
// t.Fatal(err)
// }
// expectTrue(t, len(rows) == len(expectedData))
// for idx, row := range rows {
// require.Equal(t, expectedData[idx], row)
// }
// })
// t.Run("Set watched addresses (some already watched)", func(t *testing.T) {
// args := []sdtypes.WatchAddressArg{
// {
// Address: contract4Address,
// CreatedAt: contract4CreatedAt,
// },
// {
// Address: contract2Address,
// CreatedAt: contract2CreatedAt,
// },
// {
// Address: contract3Address,
// CreatedAt: contract3CreatedAt,
// },
// }
// expectedData := []res{
// {
// Address: contract4Address,
// CreatedAt: contract4CreatedAt,
// WatchedAt: watchedAt3,
// LastFilledAt: lastFilledAt,
// },
// {
// Address: contract2Address,
// CreatedAt: contract2CreatedAt,
// WatchedAt: watchedAt3,
// LastFilledAt: lastFilledAt,
// },
// {
// Address: contract3Address,
// CreatedAt: contract3CreatedAt,
// WatchedAt: watchedAt3,
// LastFilledAt: lastFilledAt,
// },
// }
// err = ind.SetWatchedAddresses(args, big.NewInt(int64(watchedAt3)))
// require.NoError(t, err)
// rows := []res{}
// err = db.Select(context.Background(), &rows, pgStr)
// if err != nil {
// t.Fatal(err)
// }
// expectTrue(t, len(rows) == len(expectedData))
// for idx, row := range rows {
// require.Equal(t, expectedData[idx], row)
// }
// })
// t.Run("Load watched addresses", func(t *testing.T) {
// expectedData := []common.Address{
// common.HexToAddress(contract4Address),
// common.HexToAddress(contract2Address),
// common.HexToAddress(contract3Address),
// }
// rows, err := ind.LoadWatchedAddresses()
// require.NoError(t, err)
// expectTrue(t, len(rows) == len(expectedData))
// for idx, row := range rows {
// require.Equal(t, expectedData[idx], row)
// }
// })
// t.Run("Clear watched addresses", func(t *testing.T) {
// expectedData := []res{}
// err = ind.ClearWatchedAddresses()
// require.NoError(t, err)
// rows := []res{}
// err = db.Select(context.Background(), &rows, pgStr)
// if err != nil {
// t.Fatal(err)
// }
// expectTrue(t, len(rows) == len(expectedData))
// for idx, row := range rows {
// require.Equal(t, expectedData[idx], row)
// }
// })
// t.Run("Clear watched addresses (empty table)", func(t *testing.T) {
// expectedData := []res{}
// err = ind.ClearWatchedAddresses()
// require.NoError(t, err)
// rows := []res{}
// err = db.Select(context.Background(), &rows, pgStr)
// if err != nil {
// t.Fatal(err)
// }
// expectTrue(t, len(rows) == len(expectedData))
// for idx, row := range rows {
// require.Equal(t, expectedData[idx], row)
// }
// })
// }
test.TestClearEmptyWatchedAddresses(t, db)
})
}

View File

@ -255,6 +255,21 @@ var (
},
},
}
// Mock data for testing watched addresses methods
Contract1Address = "0x5d663F5269090bD2A7DC2390c911dF6083D7b28F"
Contract2Address = "0x6Eb7e5C66DB8af2E96159AC440cbc8CDB7fbD26B"
Contract3Address = "0xcfeB164C328CA13EFd3C77E1980d94975aDfedfc"
Contract4Address = "0x0Edf0c4f393a628DE4828B228C48175b3EA297fc"
Contract1CreatedAt = uint64(1)
Contract2CreatedAt = uint64(2)
Contract3CreatedAt = uint64(3)
Contract4CreatedAt = uint64(4)
LastFilledAt = uint64(0)
WatchedAt1 = uint64(10)
WatchedAt2 = uint64(15)
WatchedAt3 = uint64(20)
)
type LegacyData struct {
@ -505,3 +520,90 @@ func createNonCanonicalBlockReceipts(config *params.ChainConfig, blockNumber *bi
return types.Receipts{mockReceipt0, mockReceipt1}
}
// Helper methods for testing watched addresses methods
func GetInsertWatchedAddressesArgs() []sdtypes.WatchAddressArg {
return []sdtypes.WatchAddressArg{
{
Address: Contract1Address,
CreatedAt: Contract1CreatedAt,
},
{
Address: Contract2Address,
CreatedAt: Contract2CreatedAt,
},
}
}
func GetInsertAlreadyWatchedAddressesArgs() []sdtypes.WatchAddressArg {
return []sdtypes.WatchAddressArg{
{
Address: Contract3Address,
CreatedAt: Contract3CreatedAt,
},
{
Address: Contract2Address,
CreatedAt: Contract2CreatedAt,
},
}
}
func GetRemoveWatchedAddressesArgs() []sdtypes.WatchAddressArg {
return []sdtypes.WatchAddressArg{
{
Address: Contract3Address,
CreatedAt: Contract3CreatedAt,
},
{
Address: Contract2Address,
CreatedAt: Contract2CreatedAt,
},
}
}
func GetRemoveNonWatchedAddressesArgs() []sdtypes.WatchAddressArg {
return []sdtypes.WatchAddressArg{
{
Address: Contract1Address,
CreatedAt: Contract1CreatedAt,
},
{
Address: Contract2Address,
CreatedAt: Contract2CreatedAt,
},
}
}
func GetSetWatchedAddressesArgs() []sdtypes.WatchAddressArg {
return []sdtypes.WatchAddressArg{
{
Address: Contract1Address,
CreatedAt: Contract1CreatedAt,
},
{
Address: Contract2Address,
CreatedAt: Contract2CreatedAt,
},
{
Address: Contract3Address,
CreatedAt: Contract3CreatedAt,
},
}
}
func GetSetAlreadyWatchedAddressesArgs() []sdtypes.WatchAddressArg {
return []sdtypes.WatchAddressArg{
{
Address: Contract4Address,
CreatedAt: Contract4CreatedAt,
},
{
Address: Contract2Address,
CreatedAt: Contract2CreatedAt,
},
{
Address: Contract3Address,
CreatedAt: Contract3CreatedAt,
},
}
}

View File

@ -1,263 +1,45 @@
// VulcanizeDB
// Copyright © 2022 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package test
import (
"bytes"
"context"
"fmt"
"os"
"sort"
"testing"
"github.com/ipfs/go-cid"
blockstore "github.com/ipfs/go-ipfs-blockstore"
dshelp "github.com/ipfs/go-ipfs-ds-help"
"github.com/multiformats/go-multihash"
"github.com/stretchr/testify/require"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/statediff/indexer/database/file"
"github.com/ethereum/go-ethereum/statediff/indexer/database/sql"
"github.com/ethereum/go-ethereum/statediff/indexer/interfaces"
"github.com/ethereum/go-ethereum/statediff/indexer/ipld"
"github.com/ethereum/go-ethereum/statediff/indexer/mocks"
"github.com/ethereum/go-ethereum/statediff/indexer/models"
"github.com/ethereum/go-ethereum/statediff/indexer/shared"
"github.com/ethereum/go-ethereum/statediff/indexer/test_helpers"
)
var (
err error
ipfsPgGet = `SELECT data FROM public.blocks
WHERE key = $1 AND block_number = $2`
tx1, tx2, tx3, tx4, tx5, rct1, rct2, rct3, rct4, rct5 []byte
nonCanonicalBlockRct1, nonCanonicalBlockRct2 []byte
nonCanonicalBlock2Rct1, nonCanonicalBlock2Rct2 []byte
mockBlock, mockNonCanonicalBlock, mockNonCanonicalBlock2 *types.Block
headerCID, mockNonCanonicalHeaderCID, mockNonCanonicalHeader2CID cid.Cid
trx1CID, trx2CID, trx3CID, trx4CID, trx5CID cid.Cid
rct1CID, rct2CID, rct3CID, rct4CID, rct5CID cid.Cid
nonCanonicalBlockRct1CID, nonCanonicalBlockRct2CID cid.Cid
nonCanonicalBlock2Rct1CID, nonCanonicalBlock2Rct2CID cid.Cid
rctLeaf1, rctLeaf2, rctLeaf3, rctLeaf4, rctLeaf5 []byte
nonCanonicalBlockRctLeaf1, nonCanonicalBlockRctLeaf2 []byte
nonCanonicalBlock2RctLeaf1, nonCanonicalBlock2RctLeaf2 []byte
state1CID, state2CID, storageCID cid.Cid
contract1Address, contract2Address, contract3Address, contract4Address string
contract1CreatedAt, contract2CreatedAt, contract3CreatedAt, contract4CreatedAt uint64
lastFilledAt, watchedAt1, watchedAt2, watchedAt3 uint64
)
func init() {
if os.Getenv("MODE") != "statediff" {
fmt.Println("Skipping statediff test")
os.Exit(0)
}
// canonical block at LondonBlock height
mockBlock = mocks.MockBlock
txs, rcts := mocks.MockBlock.Transactions(), mocks.MockReceipts
// non-canonical block at LondonBlock height
mockNonCanonicalBlock = mocks.MockNonCanonicalBlock
nonCanonicalBlockRcts := mocks.MockNonCanonicalBlockReceipts
// non-canonical block at LondonBlock height + 1
mockNonCanonicalBlock2 = mocks.MockNonCanonicalBlock2
nonCanonicalBlock2Rcts := mocks.MockNonCanonicalBlock2Receipts
// encode mock receipts
buf := new(bytes.Buffer)
txs.EncodeIndex(0, buf)
tx1 = make([]byte, buf.Len())
copy(tx1, buf.Bytes())
buf.Reset()
txs.EncodeIndex(1, buf)
tx2 = make([]byte, buf.Len())
copy(tx2, buf.Bytes())
buf.Reset()
txs.EncodeIndex(2, buf)
tx3 = make([]byte, buf.Len())
copy(tx3, buf.Bytes())
buf.Reset()
txs.EncodeIndex(3, buf)
tx4 = make([]byte, buf.Len())
copy(tx4, buf.Bytes())
buf.Reset()
txs.EncodeIndex(4, buf)
tx5 = make([]byte, buf.Len())
copy(tx5, buf.Bytes())
buf.Reset()
rcts.EncodeIndex(0, buf)
rct1 = make([]byte, buf.Len())
copy(rct1, buf.Bytes())
buf.Reset()
rcts.EncodeIndex(1, buf)
rct2 = make([]byte, buf.Len())
copy(rct2, buf.Bytes())
buf.Reset()
rcts.EncodeIndex(2, buf)
rct3 = make([]byte, buf.Len())
copy(rct3, buf.Bytes())
buf.Reset()
rcts.EncodeIndex(3, buf)
rct4 = make([]byte, buf.Len())
copy(rct4, buf.Bytes())
buf.Reset()
rcts.EncodeIndex(4, buf)
rct5 = make([]byte, buf.Len())
copy(rct5, buf.Bytes())
buf.Reset()
// encode mock receipts for non-canonical blocks
nonCanonicalBlockRcts.EncodeIndex(0, buf)
nonCanonicalBlockRct1 = make([]byte, buf.Len())
copy(nonCanonicalBlockRct1, buf.Bytes())
buf.Reset()
nonCanonicalBlockRcts.EncodeIndex(1, buf)
nonCanonicalBlockRct2 = make([]byte, buf.Len())
copy(nonCanonicalBlockRct2, buf.Bytes())
buf.Reset()
nonCanonicalBlock2Rcts.EncodeIndex(0, buf)
nonCanonicalBlock2Rct1 = make([]byte, buf.Len())
copy(nonCanonicalBlock2Rct1, buf.Bytes())
buf.Reset()
nonCanonicalBlock2Rcts.EncodeIndex(1, buf)
nonCanonicalBlock2Rct2 = make([]byte, buf.Len())
copy(nonCanonicalBlock2Rct2, buf.Bytes())
buf.Reset()
headerCID, _ = ipld.RawdataToCid(ipld.MEthHeader, mocks.MockHeaderRlp, multihash.KECCAK_256)
mockNonCanonicalHeaderCID, _ = ipld.RawdataToCid(ipld.MEthHeader, mocks.MockNonCanonicalHeaderRlp, multihash.KECCAK_256)
mockNonCanonicalHeader2CID, _ = ipld.RawdataToCid(ipld.MEthHeader, mocks.MockNonCanonicalHeader2Rlp, multihash.KECCAK_256)
trx1CID, _ = ipld.RawdataToCid(ipld.MEthTx, tx1, multihash.KECCAK_256)
trx2CID, _ = ipld.RawdataToCid(ipld.MEthTx, tx2, multihash.KECCAK_256)
trx3CID, _ = ipld.RawdataToCid(ipld.MEthTx, tx3, multihash.KECCAK_256)
trx4CID, _ = ipld.RawdataToCid(ipld.MEthTx, tx4, multihash.KECCAK_256)
trx5CID, _ = ipld.RawdataToCid(ipld.MEthTx, tx5, multihash.KECCAK_256)
state1CID, _ = ipld.RawdataToCid(ipld.MEthStateTrie, mocks.ContractLeafNode, multihash.KECCAK_256)
state2CID, _ = ipld.RawdataToCid(ipld.MEthStateTrie, mocks.AccountLeafNode, multihash.KECCAK_256)
storageCID, _ = ipld.RawdataToCid(ipld.MEthStorageTrie, mocks.StorageLeafNode, multihash.KECCAK_256)
// create raw receipts
rawRctLeafNodes, rctleafNodeCids := createRctTrie([][]byte{rct1, rct2, rct3, rct4, rct5})
rct1CID = rctleafNodeCids[0]
rct2CID = rctleafNodeCids[1]
rct3CID = rctleafNodeCids[2]
rct4CID = rctleafNodeCids[3]
rct5CID = rctleafNodeCids[4]
rctLeaf1 = rawRctLeafNodes[0]
rctLeaf2 = rawRctLeafNodes[1]
rctLeaf3 = rawRctLeafNodes[2]
rctLeaf4 = rawRctLeafNodes[3]
rctLeaf5 = rawRctLeafNodes[4]
// create raw receipts for non-canonical blocks
nonCanonicalBlockRawRctLeafNodes, nonCanonicalBlockRctLeafNodeCids := createRctTrie([][]byte{nonCanonicalBlockRct1, nonCanonicalBlockRct2})
nonCanonicalBlockRct1CID = nonCanonicalBlockRctLeafNodeCids[0]
nonCanonicalBlockRct2CID = nonCanonicalBlockRctLeafNodeCids[1]
nonCanonicalBlockRctLeaf1 = nonCanonicalBlockRawRctLeafNodes[0]
nonCanonicalBlockRctLeaf2 = nonCanonicalBlockRawRctLeafNodes[1]
nonCanonicalBlock2RawRctLeafNodes, nonCanonicalBlock2RctLeafNodeCids := createRctTrie([][]byte{nonCanonicalBlockRct1, nonCanonicalBlockRct2})
nonCanonicalBlock2Rct1CID = nonCanonicalBlock2RctLeafNodeCids[0]
nonCanonicalBlock2Rct2CID = nonCanonicalBlock2RctLeafNodeCids[1]
nonCanonicalBlock2RctLeaf1 = nonCanonicalBlock2RawRctLeafNodes[0]
nonCanonicalBlock2RctLeaf2 = nonCanonicalBlock2RawRctLeafNodes[1]
contract1Address = "0x5d663F5269090bD2A7DC2390c911dF6083D7b28F"
contract2Address = "0x6Eb7e5C66DB8af2E96159AC440cbc8CDB7fbD26B"
contract3Address = "0xcfeB164C328CA13EFd3C77E1980d94975aDfedfc"
contract4Address = "0x0Edf0c4f393a628DE4828B228C48175b3EA297fc"
contract1CreatedAt = uint64(1)
contract2CreatedAt = uint64(2)
contract3CreatedAt = uint64(3)
contract4CreatedAt = uint64(4)
lastFilledAt = uint64(0)
watchedAt1 = uint64(10)
watchedAt2 = uint64(15)
watchedAt3 = uint64(20)
}
// createRctTrie creates a receipt trie from the given raw receipts
// returns receipt leaf nodes and their CIDs
func createRctTrie(rcts [][]byte) ([][]byte, []cid.Cid) {
receiptTrie := ipld.NewRctTrie()
for i, rct := range rcts {
receiptTrie.Add(i, rct)
}
rctLeafNodes, keys, _ := receiptTrie.GetLeafNodes()
rctleafNodeCids := make([]cid.Cid, len(rctLeafNodes))
orderedRctLeafNodes := make([][]byte, len(rctLeafNodes))
for i, rln := range rctLeafNodes {
var idx uint
r := bytes.NewReader(keys[i].TrieKey)
rlp.Decode(r, &idx)
rctleafNodeCids[idx] = rln.Cid()
orderedRctLeafNodes[idx] = rln.RawData()
}
return orderedRctLeafNodes, rctleafNodeCids
}
// createRctModel creates a models.ReceiptModel object from a given ethereum receipt
func createRctModel(rct *types.Receipt, cid cid.Cid, blockNumber string) models.ReceiptModel {
rctModel := models.ReceiptModel{
BlockNumber: blockNumber,
HeaderID: rct.BlockHash.String(),
TxID: rct.TxHash.String(),
LeafCID: cid.String(),
LeafMhKey: shared.MultihashKeyFromCID(cid),
LogRoot: rct.LogRoot.String(),
}
contract := shared.HandleZeroAddr(rct.ContractAddress)
rctModel.Contract = contract
if contract != "" {
rctModel.ContractHash = crypto.Keccak256Hash(common.HexToAddress(contract).Bytes()).String()
}
if len(rct.PostState) == 0 {
rctModel.PostStatus = rct.Status
} else {
rctModel.PostState = common.Bytes2Hex(rct.PostState)
}
return rctModel
}
func expectTrue(t *testing.T, value bool) {
if !value {
t.Fatalf("Assertion failed")
}
}
// setupTestData indexes a single mock block along with it's state nodes
// SetupTestData indexes a single mock block along with it's state nodes
func SetupTestData(t *testing.T, ind interfaces.StateDiffIndexer) {
var tx interfaces.Batch
tx, err = ind.PushBlock(
@ -802,8 +584,8 @@ func TestPublishAndIndexStorageIPLDs(t *testing.T, db sql.Database) {
// and a non-canonical block at London height + 1
// along with their state nodes
func SetupTestDataNonCanonical(t *testing.T, ind interfaces.StateDiffIndexer) {
// index a canonical block at London height
var tx1 interfaces.Batch
tx1, err = ind.PushBlock(
mockBlock,
mocks.MockReceipts,
@ -826,6 +608,8 @@ func SetupTestDataNonCanonical(t *testing.T, ind interfaces.StateDiffIndexer) {
t.Fatal(err)
}
// index a non-canonical block at London height
// has transactions overlapping with that of the canonical block
var tx2 interfaces.Batch
tx2, err = ind.PushBlock(
mockNonCanonicalBlock,
@ -849,6 +633,8 @@ func SetupTestDataNonCanonical(t *testing.T, ind interfaces.StateDiffIndexer) {
t.Fatal(err)
}
// index a non-canonical block at London height + 1
// has transactions overlapping with that of the canonical block
var tx3 interfaces.Batch
tx3, err = ind.PushBlock(
mockNonCanonicalBlock2,
@ -1032,7 +818,7 @@ func TestPublishAndIndexTransactionsNonCanonical(t *testing.T, db sql.Database)
},
}
// expected transactions in the canonical block at London height
// expected transactions in the non-canonical block at London height
mockNonCanonicalBlockTxs := mockNonCanonicalBlock.Transactions()
expectedNonCanonicalBlockTxs := []models.TxModel{
{
@ -1061,7 +847,7 @@ func TestPublishAndIndexTransactionsNonCanonical(t *testing.T, db sql.Database)
},
}
// expected transactions in the canonical block at London height + 1
// expected transactions in the non-canonical block at London height + 1
mockNonCanonicalBlock2Txs := mockNonCanonicalBlock2.Transactions()
expectedNonCanonicalBlock2Txs := []models.TxModel{
{
@ -1150,7 +936,7 @@ func TestPublishAndIndexReceiptsNonCanonical(t *testing.T, db sql.Database) {
expectedBlockRctsMap[rctCids[i].String()] = rctModel
}
// expected receipts in the canonical block at London height + 1
// expected receipts in the non-canonical block at London height
nonCanonicalBlockRctCids := []cid.Cid{nonCanonicalBlockRct1CID, nonCanonicalBlockRct2CID}
expectedNonCanonicalBlockRctsMap := make(map[string]models.ReceiptModel, len(mocks.MockNonCanonicalBlockReceipts))
for i, mockNonCanonicalBlockRct := range mocks.MockNonCanonicalBlockReceipts {
@ -1158,7 +944,7 @@ func TestPublishAndIndexReceiptsNonCanonical(t *testing.T, db sql.Database) {
expectedNonCanonicalBlockRctsMap[nonCanonicalBlockRctCids[i].String()] = rctModel
}
// expected receipts in the canonical block at London height + 1
// expected receipts in the non-canonical block at London height + 1
nonCanonicalBlock2RctCids := []cid.Cid{nonCanonicalBlock2Rct1CID, nonCanonicalBlock2Rct2CID}
expectedNonCanonicalBlock2RctsMap := make(map[string]models.ReceiptModel, len(mocks.MockNonCanonicalBlock2Receipts))
for i, mockNonCanonicalBlock2Rct := range mocks.MockNonCanonicalBlock2Receipts {
@ -1251,7 +1037,7 @@ func TestPublishAndIndexLogsNonCanonical(t *testing.T, db sql.Database) {
})
}
// logs in the canonical block at London height + 1
// logs in the non-canonical block at London height
for _, mockBlockRct := range mocks.MockNonCanonicalBlockReceipts {
mockRcts = append(mockRcts, rctWithBlockHash{
mockBlockRct,
@ -1260,7 +1046,7 @@ func TestPublishAndIndexLogsNonCanonical(t *testing.T, db sql.Database) {
})
}
// logs in the canonical block at London height + 1
// logs in the non-canonical block at London height + 1
for _, mockBlockRct := range mocks.MockNonCanonicalBlock2Receipts {
mockRcts = append(mockRcts, rctWithBlockHash{
mockBlockRct,
@ -1407,7 +1193,7 @@ func TestPublishAndIndexStorageNonCanonical(t *testing.T, db sql.Database) {
removedNodeCID, _ := cid.Decode(shared.RemovedNodeStorageCID)
storageNodeCIDs := []cid.Cid{storageCID, removedNodeCID, removedNodeCID, removedNodeCID}
// expected state nodes in the canonical and the non-canonical block at London height
// expected storage nodes in the canonical and the non-canonical block at London height
expectedStorageNodes := make([]models.StorageNodeModel, 0)
storageNodeIndex := 0
for _, stateDiff := range mocks.StateDiffs {
@ -1432,7 +1218,7 @@ func TestPublishAndIndexStorageNonCanonical(t *testing.T, db sql.Database) {
}
})
// expected state nodes in the non-canonical block at London height + 1
// expected storage nodes in the non-canonical block at London height + 1
expectedNonCanonicalBlock2StorageNodes := make([]models.StorageNodeModel, 0)
storageNodeIndex = 0
for _, stateDiff := range mocks.StateDiffs[:2] {
@ -1486,91 +1272,3 @@ func TestPublishAndIndexStorageNonCanonical(t *testing.T, db sql.Database) {
require.Equal(t, expectedStorageNode, storageNodes[i])
}
}
var (
LegacyConfig = params.MainnetChainConfig
legacyData = mocks.NewLegacyData(LegacyConfig)
mockLegacyBlock *types.Block
legacyHeaderCID cid.Cid
)
func SetupLegacyTestData(t *testing.T, ind interfaces.StateDiffIndexer) {
mockLegacyBlock = legacyData.MockBlock
legacyHeaderCID, _ = ipld.RawdataToCid(ipld.MEthHeader, legacyData.MockHeaderRlp, multihash.KECCAK_256)
var tx interfaces.Batch
tx, err = ind.PushBlock(
mockLegacyBlock,
legacyData.MockReceipts,
legacyData.MockBlock.Difficulty())
require.NoError(t, err)
defer func() {
if err := tx.Submit(err); err != nil {
t.Fatal(err)
}
}()
for _, node := range legacyData.StateDiffs {
err = ind.PushStateNode(tx, node, mockLegacyBlock.Hash().String())
require.NoError(t, err)
}
if batchTx, ok := tx.(*sql.BatchTx); ok {
require.Equal(t, legacyData.BlockNumber.String(), batchTx.BlockNumber)
} else if batchTx, ok := tx.(*file.BatchTx); ok {
require.Equal(t, legacyData.BlockNumber.String(), batchTx.BlockNumber)
}
}
func TestLegacyIndexer(t *testing.T, db sql.Database) {
pgStr := `SELECT cid, td, reward, block_hash, coinbase
FROM eth.header_cids
WHERE block_number = $1`
// check header was properly indexed
type res struct {
CID string
TD string
Reward string
BlockHash string `db:"block_hash"`
Coinbase string `db:"coinbase"`
}
header := new(res)
err = db.QueryRow(context.Background(), pgStr, legacyData.BlockNumber.Uint64()).Scan(
&header.CID,
&header.TD,
&header.Reward,
&header.BlockHash,
&header.Coinbase)
require.NoError(t, err)
require.Equal(t, legacyHeaderCID.String(), header.CID)
require.Equal(t, legacyData.MockBlock.Difficulty().String(), header.TD)
require.Equal(t, "5000000000000011250", header.Reward)
require.Equal(t, legacyData.MockHeader.Coinbase.String(), header.Coinbase)
require.Nil(t, legacyData.MockHeader.BaseFee)
}
func TestBlock(t *testing.T, ind interfaces.StateDiffIndexer, testBlock *types.Block, testReceipts types.Receipts) {
var tx interfaces.Batch
tx, err = ind.PushBlock(
testBlock,
testReceipts,
testBlock.Difficulty())
require.NoError(t, err)
defer func() {
if err := tx.Submit(err); err != nil {
t.Fatal(err)
}
}()
for _, node := range mocks.StateDiffs {
err = ind.PushStateNode(tx, node, testBlock.Hash().String())
require.NoError(t, err)
}
if batchTx, ok := tx.(*sql.BatchTx); ok {
require.Equal(t, testBlock.Number().String(), batchTx.BlockNumber)
} else if batchTx, ok := tx.(*file.BatchTx); ok {
require.Equal(t, testBlock.Number().String(), batchTx.BlockNumber)
}
}

View File

@ -0,0 +1,248 @@
// VulcanizeDB
// Copyright © 2022 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package test
import (
"bytes"
"fmt"
"os"
"testing"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/statediff/indexer/ipld"
"github.com/ethereum/go-ethereum/statediff/indexer/mocks"
"github.com/ethereum/go-ethereum/statediff/indexer/models"
"github.com/ethereum/go-ethereum/statediff/indexer/shared"
"github.com/ipfs/go-cid"
"github.com/multiformats/go-multihash"
)
var (
err error
ipfsPgGet = `SELECT data FROM public.blocks
WHERE key = $1 AND block_number = $2`
watchedAddressesPgGet = `SELECT *
FROM eth_meta.watched_addresses`
tx1, tx2, tx3, tx4, tx5, rct1, rct2, rct3, rct4, rct5 []byte
nonCanonicalBlockRct1, nonCanonicalBlockRct2 []byte
nonCanonicalBlock2Rct1, nonCanonicalBlock2Rct2 []byte
mockBlock, mockNonCanonicalBlock, mockNonCanonicalBlock2 *types.Block
headerCID, mockNonCanonicalHeaderCID, mockNonCanonicalHeader2CID cid.Cid
trx1CID, trx2CID, trx3CID, trx4CID, trx5CID cid.Cid
rct1CID, rct2CID, rct3CID, rct4CID, rct5CID cid.Cid
nonCanonicalBlockRct1CID, nonCanonicalBlockRct2CID cid.Cid
nonCanonicalBlock2Rct1CID, nonCanonicalBlock2Rct2CID cid.Cid
rctLeaf1, rctLeaf2, rctLeaf3, rctLeaf4, rctLeaf5 []byte
nonCanonicalBlockRctLeaf1, nonCanonicalBlockRctLeaf2 []byte
nonCanonicalBlock2RctLeaf1, nonCanonicalBlock2RctLeaf2 []byte
state1CID, state2CID, storageCID cid.Cid
)
func init() {
if os.Getenv("MODE") != "statediff" {
fmt.Println("Skipping statediff test")
os.Exit(0)
}
// canonical block at LondonBlock height
mockBlock = mocks.MockBlock
txs, rcts := mocks.MockBlock.Transactions(), mocks.MockReceipts
// non-canonical block at LondonBlock height
mockNonCanonicalBlock = mocks.MockNonCanonicalBlock
nonCanonicalBlockRcts := mocks.MockNonCanonicalBlockReceipts
// non-canonical block at LondonBlock height + 1
mockNonCanonicalBlock2 = mocks.MockNonCanonicalBlock2
nonCanonicalBlock2Rcts := mocks.MockNonCanonicalBlock2Receipts
// encode mock receipts
buf := new(bytes.Buffer)
txs.EncodeIndex(0, buf)
tx1 = make([]byte, buf.Len())
copy(tx1, buf.Bytes())
buf.Reset()
txs.EncodeIndex(1, buf)
tx2 = make([]byte, buf.Len())
copy(tx2, buf.Bytes())
buf.Reset()
txs.EncodeIndex(2, buf)
tx3 = make([]byte, buf.Len())
copy(tx3, buf.Bytes())
buf.Reset()
txs.EncodeIndex(3, buf)
tx4 = make([]byte, buf.Len())
copy(tx4, buf.Bytes())
buf.Reset()
txs.EncodeIndex(4, buf)
tx5 = make([]byte, buf.Len())
copy(tx5, buf.Bytes())
buf.Reset()
rcts.EncodeIndex(0, buf)
rct1 = make([]byte, buf.Len())
copy(rct1, buf.Bytes())
buf.Reset()
rcts.EncodeIndex(1, buf)
rct2 = make([]byte, buf.Len())
copy(rct2, buf.Bytes())
buf.Reset()
rcts.EncodeIndex(2, buf)
rct3 = make([]byte, buf.Len())
copy(rct3, buf.Bytes())
buf.Reset()
rcts.EncodeIndex(3, buf)
rct4 = make([]byte, buf.Len())
copy(rct4, buf.Bytes())
buf.Reset()
rcts.EncodeIndex(4, buf)
rct5 = make([]byte, buf.Len())
copy(rct5, buf.Bytes())
buf.Reset()
// encode mock receipts for non-canonical blocks
nonCanonicalBlockRcts.EncodeIndex(0, buf)
nonCanonicalBlockRct1 = make([]byte, buf.Len())
copy(nonCanonicalBlockRct1, buf.Bytes())
buf.Reset()
nonCanonicalBlockRcts.EncodeIndex(1, buf)
nonCanonicalBlockRct2 = make([]byte, buf.Len())
copy(nonCanonicalBlockRct2, buf.Bytes())
buf.Reset()
nonCanonicalBlock2Rcts.EncodeIndex(0, buf)
nonCanonicalBlock2Rct1 = make([]byte, buf.Len())
copy(nonCanonicalBlock2Rct1, buf.Bytes())
buf.Reset()
nonCanonicalBlock2Rcts.EncodeIndex(1, buf)
nonCanonicalBlock2Rct2 = make([]byte, buf.Len())
copy(nonCanonicalBlock2Rct2, buf.Bytes())
buf.Reset()
headerCID, _ = ipld.RawdataToCid(ipld.MEthHeader, mocks.MockHeaderRlp, multihash.KECCAK_256)
mockNonCanonicalHeaderCID, _ = ipld.RawdataToCid(ipld.MEthHeader, mocks.MockNonCanonicalHeaderRlp, multihash.KECCAK_256)
mockNonCanonicalHeader2CID, _ = ipld.RawdataToCid(ipld.MEthHeader, mocks.MockNonCanonicalHeader2Rlp, multihash.KECCAK_256)
trx1CID, _ = ipld.RawdataToCid(ipld.MEthTx, tx1, multihash.KECCAK_256)
trx2CID, _ = ipld.RawdataToCid(ipld.MEthTx, tx2, multihash.KECCAK_256)
trx3CID, _ = ipld.RawdataToCid(ipld.MEthTx, tx3, multihash.KECCAK_256)
trx4CID, _ = ipld.RawdataToCid(ipld.MEthTx, tx4, multihash.KECCAK_256)
trx5CID, _ = ipld.RawdataToCid(ipld.MEthTx, tx5, multihash.KECCAK_256)
state1CID, _ = ipld.RawdataToCid(ipld.MEthStateTrie, mocks.ContractLeafNode, multihash.KECCAK_256)
state2CID, _ = ipld.RawdataToCid(ipld.MEthStateTrie, mocks.AccountLeafNode, multihash.KECCAK_256)
storageCID, _ = ipld.RawdataToCid(ipld.MEthStorageTrie, mocks.StorageLeafNode, multihash.KECCAK_256)
// create raw receipts
rawRctLeafNodes, rctleafNodeCids := createRctTrie([][]byte{rct1, rct2, rct3, rct4, rct5})
rct1CID = rctleafNodeCids[0]
rct2CID = rctleafNodeCids[1]
rct3CID = rctleafNodeCids[2]
rct4CID = rctleafNodeCids[3]
rct5CID = rctleafNodeCids[4]
rctLeaf1 = rawRctLeafNodes[0]
rctLeaf2 = rawRctLeafNodes[1]
rctLeaf3 = rawRctLeafNodes[2]
rctLeaf4 = rawRctLeafNodes[3]
rctLeaf5 = rawRctLeafNodes[4]
// create raw receipts for non-canonical blocks
nonCanonicalBlockRawRctLeafNodes, nonCanonicalBlockRctLeafNodeCids := createRctTrie([][]byte{nonCanonicalBlockRct1, nonCanonicalBlockRct2})
nonCanonicalBlockRct1CID = nonCanonicalBlockRctLeafNodeCids[0]
nonCanonicalBlockRct2CID = nonCanonicalBlockRctLeafNodeCids[1]
nonCanonicalBlockRctLeaf1 = nonCanonicalBlockRawRctLeafNodes[0]
nonCanonicalBlockRctLeaf2 = nonCanonicalBlockRawRctLeafNodes[1]
nonCanonicalBlock2RawRctLeafNodes, nonCanonicalBlock2RctLeafNodeCids := createRctTrie([][]byte{nonCanonicalBlockRct1, nonCanonicalBlockRct2})
nonCanonicalBlock2Rct1CID = nonCanonicalBlock2RctLeafNodeCids[0]
nonCanonicalBlock2Rct2CID = nonCanonicalBlock2RctLeafNodeCids[1]
nonCanonicalBlock2RctLeaf1 = nonCanonicalBlock2RawRctLeafNodes[0]
nonCanonicalBlock2RctLeaf2 = nonCanonicalBlock2RawRctLeafNodes[1]
}
// createRctTrie creates a receipt trie from the given raw receipts
// returns receipt leaf nodes and their CIDs
func createRctTrie(rcts [][]byte) ([][]byte, []cid.Cid) {
receiptTrie := ipld.NewRctTrie()
for i, rct := range rcts {
receiptTrie.Add(i, rct)
}
rctLeafNodes, keys, _ := receiptTrie.GetLeafNodes()
rctleafNodeCids := make([]cid.Cid, len(rctLeafNodes))
orderedRctLeafNodes := make([][]byte, len(rctLeafNodes))
for i, rln := range rctLeafNodes {
var idx uint
r := bytes.NewReader(keys[i].TrieKey)
rlp.Decode(r, &idx)
rctleafNodeCids[idx] = rln.Cid()
orderedRctLeafNodes[idx] = rln.RawData()
}
return orderedRctLeafNodes, rctleafNodeCids
}
// createRctModel creates a models.ReceiptModel object from a given ethereum receipt
func createRctModel(rct *types.Receipt, cid cid.Cid, blockNumber string) models.ReceiptModel {
rctModel := models.ReceiptModel{
BlockNumber: blockNumber,
HeaderID: rct.BlockHash.String(),
TxID: rct.TxHash.String(),
LeafCID: cid.String(),
LeafMhKey: shared.MultihashKeyFromCID(cid),
LogRoot: rct.LogRoot.String(),
}
contract := shared.HandleZeroAddr(rct.ContractAddress)
rctModel.Contract = contract
if contract != "" {
rctModel.ContractHash = crypto.Keccak256Hash(common.HexToAddress(contract).Bytes()).String()
}
if len(rct.PostState) == 0 {
rctModel.PostStatus = rct.Status
} else {
rctModel.PostState = common.Bytes2Hex(rct.PostState)
}
return rctModel
}
func expectTrue(t *testing.T, value bool) {
if !value {
t.Fatalf("Assertion failed")
}
}

View File

@ -0,0 +1,96 @@
// VulcanizeDB
// Copyright © 2022 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package test
import (
"context"
"testing"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/statediff/indexer/database/file"
"github.com/ethereum/go-ethereum/statediff/indexer/database/sql"
"github.com/ethereum/go-ethereum/statediff/indexer/interfaces"
"github.com/ethereum/go-ethereum/statediff/indexer/ipld"
"github.com/ethereum/go-ethereum/statediff/indexer/mocks"
"github.com/ipfs/go-cid"
"github.com/multiformats/go-multihash"
"github.com/stretchr/testify/require"
)
var (
LegacyConfig = params.MainnetChainConfig
legacyData = mocks.NewLegacyData(LegacyConfig)
mockLegacyBlock *types.Block
legacyHeaderCID cid.Cid
)
func SetupLegacyTestData(t *testing.T, ind interfaces.StateDiffIndexer) {
mockLegacyBlock = legacyData.MockBlock
legacyHeaderCID, _ = ipld.RawdataToCid(ipld.MEthHeader, legacyData.MockHeaderRlp, multihash.KECCAK_256)
var tx interfaces.Batch
tx, err = ind.PushBlock(
mockLegacyBlock,
legacyData.MockReceipts,
legacyData.MockBlock.Difficulty())
require.NoError(t, err)
defer func() {
if err := tx.Submit(err); err != nil {
t.Fatal(err)
}
}()
for _, node := range legacyData.StateDiffs {
err = ind.PushStateNode(tx, node, mockLegacyBlock.Hash().String())
require.NoError(t, err)
}
if batchTx, ok := tx.(*sql.BatchTx); ok {
require.Equal(t, legacyData.BlockNumber.String(), batchTx.BlockNumber)
} else if batchTx, ok := tx.(*file.BatchTx); ok {
require.Equal(t, legacyData.BlockNumber.String(), batchTx.BlockNumber)
}
}
func TestLegacyIndexer(t *testing.T, db sql.Database) {
pgStr := `SELECT cid, td, reward, block_hash, coinbase
FROM eth.header_cids
WHERE block_number = $1`
// check header was properly indexed
type res struct {
CID string
TD string
Reward string
BlockHash string `db:"block_hash"`
Coinbase string `db:"coinbase"`
}
header := new(res)
err = db.QueryRow(context.Background(), pgStr, legacyData.BlockNumber.Uint64()).Scan(
&header.CID,
&header.TD,
&header.Reward,
&header.BlockHash,
&header.Coinbase)
require.NoError(t, err)
require.Equal(t, legacyHeaderCID.String(), header.CID)
require.Equal(t, legacyData.MockBlock.Difficulty().String(), header.TD)
require.Equal(t, "5000000000000011250", header.Reward)
require.Equal(t, legacyData.MockHeader.Coinbase.String(), header.Coinbase)
require.Nil(t, legacyData.MockHeader.BaseFee)
}

View File

@ -0,0 +1,53 @@
// VulcanizeDB
// Copyright © 2022 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package test
import (
"testing"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/statediff/indexer/database/file"
"github.com/ethereum/go-ethereum/statediff/indexer/database/sql"
"github.com/ethereum/go-ethereum/statediff/indexer/interfaces"
"github.com/ethereum/go-ethereum/statediff/indexer/mocks"
"github.com/stretchr/testify/require"
)
func TestBlock(t *testing.T, ind interfaces.StateDiffIndexer, testBlock *types.Block, testReceipts types.Receipts) {
var tx interfaces.Batch
tx, err = ind.PushBlock(
testBlock,
testReceipts,
testBlock.Difficulty())
require.NoError(t, err)
defer func() {
if err := tx.Submit(err); err != nil {
t.Fatal(err)
}
}()
for _, node := range mocks.StateDiffs {
err = ind.PushStateNode(tx, node, testBlock.Hash().String())
require.NoError(t, err)
}
if batchTx, ok := tx.(*sql.BatchTx); ok {
require.Equal(t, testBlock.Number().String(), batchTx.BlockNumber)
} else if batchTx, ok := tx.(*file.BatchTx); ok {
require.Equal(t, testBlock.Number().String(), batchTx.BlockNumber)
}
}

View File

@ -0,0 +1,258 @@
// VulcanizeDB
// Copyright © 2022 Vulcanize
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package test
import (
"context"
"testing"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/statediff/indexer/database/sql"
"github.com/ethereum/go-ethereum/statediff/indexer/interfaces"
"github.com/ethereum/go-ethereum/statediff/indexer/mocks"
"github.com/stretchr/testify/require"
)
type res struct {
Address string `db:"address"`
CreatedAt uint64 `db:"created_at"`
WatchedAt uint64 `db:"watched_at"`
LastFilledAt uint64 `db:"last_filled_at"`
}
func TestLoadEmptyWatchedAddresses(t *testing.T, ind interfaces.StateDiffIndexer) {
expectedData := []common.Address{}
rows, err := ind.LoadWatchedAddresses()
require.NoError(t, err)
require.Equal(t, len(expectedData), len(rows))
for idx, row := range rows {
require.Equal(t, expectedData[idx], row)
}
}
func TestInsertWatchedAddresses(t *testing.T, db sql.Database) {
expectedData := []res{
{
Address: mocks.Contract1Address,
CreatedAt: mocks.Contract1CreatedAt,
WatchedAt: mocks.WatchedAt1,
LastFilledAt: mocks.LastFilledAt,
},
{
Address: mocks.Contract2Address,
CreatedAt: mocks.Contract2CreatedAt,
WatchedAt: mocks.WatchedAt1,
LastFilledAt: mocks.LastFilledAt,
},
}
rows := []res{}
err = db.Select(context.Background(), &rows, watchedAddressesPgGet)
if err != nil {
t.Fatal(err)
}
require.Equal(t, len(expectedData), len(rows))
for idx, row := range rows {
require.Equal(t, expectedData[idx], row)
}
}
func TestInsertAlreadyWatchedAddresses(t *testing.T, db sql.Database) {
expectedData := []res{
{
Address: mocks.Contract1Address,
CreatedAt: mocks.Contract1CreatedAt,
WatchedAt: mocks.WatchedAt1,
LastFilledAt: mocks.LastFilledAt,
},
{
Address: mocks.Contract2Address,
CreatedAt: mocks.Contract2CreatedAt,
WatchedAt: mocks.WatchedAt1,
LastFilledAt: mocks.LastFilledAt,
},
{
Address: mocks.Contract3Address,
CreatedAt: mocks.Contract3CreatedAt,
WatchedAt: mocks.WatchedAt2,
LastFilledAt: mocks.LastFilledAt,
},
}
rows := []res{}
err = db.Select(context.Background(), &rows, watchedAddressesPgGet)
if err != nil {
t.Fatal(err)
}
require.Equal(t, len(expectedData), len(rows))
for idx, row := range rows {
require.Equal(t, expectedData[idx], row)
}
}
func TestRemoveWatchedAddresses(t *testing.T, db sql.Database) {
expectedData := []res{
{
Address: mocks.Contract1Address,
CreatedAt: mocks.Contract1CreatedAt,
WatchedAt: mocks.WatchedAt1,
LastFilledAt: mocks.LastFilledAt,
},
}
rows := []res{}
err = db.Select(context.Background(), &rows, watchedAddressesPgGet)
if err != nil {
t.Fatal(err)
}
require.Equal(t, len(expectedData), len(rows))
for idx, row := range rows {
require.Equal(t, expectedData[idx], row)
}
}
func TestRemoveNonWatchedAddresses(t *testing.T, db sql.Database) {
expectedData := []res{}
rows := []res{}
err = db.Select(context.Background(), &rows, watchedAddressesPgGet)
if err != nil {
t.Fatal(err)
}
require.Equal(t, len(expectedData), len(rows))
for idx, row := range rows {
require.Equal(t, expectedData[idx], row)
}
}
func TestSetWatchedAddresses(t *testing.T, db sql.Database) {
expectedData := []res{
{
Address: mocks.Contract1Address,
CreatedAt: mocks.Contract1CreatedAt,
WatchedAt: mocks.WatchedAt2,
LastFilledAt: mocks.LastFilledAt,
},
{
Address: mocks.Contract2Address,
CreatedAt: mocks.Contract2CreatedAt,
WatchedAt: mocks.WatchedAt2,
LastFilledAt: mocks.LastFilledAt,
},
{
Address: mocks.Contract3Address,
CreatedAt: mocks.Contract3CreatedAt,
WatchedAt: mocks.WatchedAt2,
LastFilledAt: mocks.LastFilledAt,
},
}
rows := []res{}
err = db.Select(context.Background(), &rows, watchedAddressesPgGet)
if err != nil {
t.Fatal(err)
}
require.Equal(t, len(expectedData), len(rows))
for idx, row := range rows {
require.Equal(t, expectedData[idx], row)
}
}
func TestSetAlreadyWatchedAddresses(t *testing.T, db sql.Database) {
expectedData := []res{
{
Address: mocks.Contract4Address,
CreatedAt: mocks.Contract4CreatedAt,
WatchedAt: mocks.WatchedAt3,
LastFilledAt: mocks.LastFilledAt,
},
{
Address: mocks.Contract2Address,
CreatedAt: mocks.Contract2CreatedAt,
WatchedAt: mocks.WatchedAt3,
LastFilledAt: mocks.LastFilledAt,
},
{
Address: mocks.Contract3Address,
CreatedAt: mocks.Contract3CreatedAt,
WatchedAt: mocks.WatchedAt3,
LastFilledAt: mocks.LastFilledAt,
},
}
rows := []res{}
err = db.Select(context.Background(), &rows, watchedAddressesPgGet)
if err != nil {
t.Fatal(err)
}
require.Equal(t, len(expectedData), len(rows))
for idx, row := range rows {
require.Equal(t, expectedData[idx], row)
}
}
func TestLoadWatchedAddresses(t *testing.T, ind interfaces.StateDiffIndexer) {
expectedData := []common.Address{
common.HexToAddress(mocks.Contract4Address),
common.HexToAddress(mocks.Contract2Address),
common.HexToAddress(mocks.Contract3Address),
}
rows, err := ind.LoadWatchedAddresses()
require.NoError(t, err)
require.Equal(t, len(expectedData), len(rows))
for idx, row := range rows {
require.Equal(t, expectedData[idx], row)
}
}
func TestClearWatchedAddresses(t *testing.T, db sql.Database) {
expectedData := []res{}
rows := []res{}
err = db.Select(context.Background(), &rows, watchedAddressesPgGet)
if err != nil {
t.Fatal(err)
}
require.Equal(t, len(expectedData), len(rows))
for idx, row := range rows {
require.Equal(t, expectedData[idx], row)
}
}
func TestClearEmptyWatchedAddresses(t *testing.T, db sql.Database) {
expectedData := []res{}
rows := []res{}
err = db.Select(context.Background(), &rows, watchedAddressesPgGet)
if err != nil {
t.Fatal(err)
}
require.Equal(t, len(expectedData), len(rows))
for idx, row := range rows {
require.Equal(t, expectedData[idx], row)
}
}

View File

@ -35,7 +35,7 @@ func ListContainsString(sss []string, s string) bool {
return false
}
// DedupFile removes the duplicates from the given file
// DedupFile removes duplicates from the given file
func DedupFile(filePath string) error {
f, err := os.OpenFile(filePath, os.O_CREATE|os.O_RDONLY, os.ModePerm)
if err != nil {