Merge pull request #5750 from filecoin-project/fix/splitstore-tests

test: attempt to make the splitstore test deterministic
This commit is contained in:
Łukasz Magiera 2021-03-09 12:47:21 +01:00 committed by GitHub
commit 2642ddcf50
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

View File

@ -4,6 +4,7 @@ import (
"context" "context"
"fmt" "fmt"
"sync" "sync"
"sync/atomic"
"testing" "testing"
"time" "time"
@ -14,6 +15,7 @@ import (
cid "github.com/ipfs/go-cid" cid "github.com/ipfs/go-cid"
datastore "github.com/ipfs/go-datastore" datastore "github.com/ipfs/go-datastore"
dssync "github.com/ipfs/go-datastore/sync"
logging "github.com/ipfs/go-log/v2" logging "github.com/ipfs/go-log/v2"
) )
@ -25,8 +27,6 @@ func init() {
} }
func testSplitStore(t *testing.T, cfg *Config) { func testSplitStore(t *testing.T, cfg *Config) {
t.Helper()
chain := &mockChain{} chain := &mockChain{}
// genesis // genesis
genBlock := mock.MkBlock(nil, 0, 0) genBlock := mock.MkBlock(nil, 0, 0)
@ -34,7 +34,7 @@ func testSplitStore(t *testing.T, cfg *Config) {
chain.push(genTs) chain.push(genTs)
// the myriads of stores // the myriads of stores
ds := datastore.NewMapDatastore() ds := dssync.MutexWrap(datastore.NewMapDatastore())
hot := blockstore.NewMemorySync() hot := blockstore.NewMemorySync()
cold := blockstore.NewMemorySync() cold := blockstore.NewMemorySync()
@ -90,9 +90,16 @@ func testSplitStore(t *testing.T, cfg *Config) {
} }
} }
waitForCompaction := func() {
for atomic.LoadInt32(&ss.compacting) == 1 {
time.Sleep(100 * time.Millisecond)
}
}
curTs := genTs curTs := genTs
for i := 1; i < 5; i++ { for i := 1; i < 5; i++ {
curTs = mkBlock(curTs, i) curTs = mkBlock(curTs, i)
waitForCompaction()
} }
mkGarbageBlock(genTs, 1) mkGarbageBlock(genTs, 1)
@ -117,17 +124,17 @@ func testSplitStore(t *testing.T, cfg *Config) {
hotCnt := countBlocks(hot) hotCnt := countBlocks(hot)
if coldCnt != 1 { if coldCnt != 1 {
t.Fatalf("expected %d blocks, but got %d", 1, coldCnt) t.Errorf("expected %d blocks, but got %d", 1, coldCnt)
} }
if hotCnt != 4 { if hotCnt != 5 {
t.Fatalf("expected %d blocks, but got %d", 4, hotCnt) t.Errorf("expected %d blocks, but got %d", 5, hotCnt)
} }
// trigger a compaction // trigger a compaction
for i := 5; i < 10; i++ { for i := 5; i < 10; i++ {
curTs = mkBlock(curTs, i) curTs = mkBlock(curTs, i)
time.Sleep(time.Second) waitForCompaction()
} }
coldCnt = countBlocks(cold) coldCnt = countBlocks(cold)
@ -135,31 +142,31 @@ func testSplitStore(t *testing.T, cfg *Config) {
if !cfg.EnableFullCompaction { if !cfg.EnableFullCompaction {
if coldCnt != 5 { if coldCnt != 5 {
t.Fatalf("expected %d cold blocks, but got %d", 5, coldCnt) t.Errorf("expected %d cold blocks, but got %d", 5, coldCnt)
} }
if hotCnt != 5 { if hotCnt != 5 {
t.Fatalf("expected %d hot blocks, but got %d", 5, hotCnt) t.Errorf("expected %d hot blocks, but got %d", 5, hotCnt)
} }
} }
if cfg.EnableFullCompaction && !cfg.EnableGC { if cfg.EnableFullCompaction && !cfg.EnableGC {
if coldCnt != 3 { if coldCnt != 3 {
t.Fatalf("expected %d cold blocks, but got %d", 3, coldCnt) t.Errorf("expected %d cold blocks, but got %d", 3, coldCnt)
} }
if hotCnt != 7 { if hotCnt != 7 {
t.Fatalf("expected %d hot blocks, but got %d", 7, hotCnt) t.Errorf("expected %d hot blocks, but got %d", 7, hotCnt)
} }
} }
if cfg.EnableFullCompaction && cfg.EnableGC { if cfg.EnableFullCompaction && cfg.EnableGC {
if coldCnt != 2 { if coldCnt != 2 {
t.Fatalf("expected %d cold blocks, but got %d", 2, coldCnt) t.Errorf("expected %d cold blocks, but got %d", 2, coldCnt)
} }
if hotCnt != 7 { if hotCnt != 7 {
t.Fatalf("expected %d hot blocks, but got %d", 7, hotCnt) t.Errorf("expected %d hot blocks, but got %d", 7, hotCnt)
} }
} }
} }