swarm/storage: Fix race in TestLDBStoreCollectGarbage. Disable testLDBStoreRemoveThenCollectGarbage (#18512)
This commit is contained in:
		
							parent
							
								
									6f45fa66d8
								
							
						
					
					
						commit
						3591fc603f
					
				| @ -26,7 +26,6 @@ import ( | ||||
| 	"strconv" | ||||
| 	"strings" | ||||
| 	"testing" | ||||
| 	"time" | ||||
| 
 | ||||
| 	"github.com/ethereum/go-ethereum/common" | ||||
| 	ch "github.com/ethereum/go-ethereum/swarm/chunk" | ||||
| @ -388,11 +387,11 @@ func testLDBStoreCollectGarbage(t *testing.T) { | ||||
| 			t.Fatal(err.Error()) | ||||
| 		} | ||||
| 		allChunks = append(allChunks, chunks...) | ||||
| 		ldb.lock.RLock() | ||||
| 		log.Debug("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt, "cap", capacity, "n", n) | ||||
| 		ldb.lock.RUnlock() | ||||
| 
 | ||||
| 		ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) | ||||
| 		defer cancel() | ||||
| 		waitGc(ctx, ldb) | ||||
| 		waitGc(ldb) | ||||
| 	} | ||||
| 
 | ||||
| 	// attempt gets on all put chunks
 | ||||
| @ -466,6 +465,7 @@ func TestLDBStoreAddRemove(t *testing.T) { | ||||
| } | ||||
| 
 | ||||
| func testLDBStoreRemoveThenCollectGarbage(t *testing.T) { | ||||
| 	t.Skip("flaky with -race flag") | ||||
| 
 | ||||
| 	params := strings.Split(t.Name(), "/") | ||||
| 	capacity, err := strconv.Atoi(params[2]) | ||||
| @ -496,9 +496,7 @@ func testLDBStoreRemoveThenCollectGarbage(t *testing.T) { | ||||
| 		} | ||||
| 	} | ||||
| 
 | ||||
| 	ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) | ||||
| 	defer cancel() | ||||
| 	waitGc(ctx, ldb) | ||||
| 	waitGc(ldb) | ||||
| 
 | ||||
| 	// delete all chunks
 | ||||
| 	// (only count the ones actually deleted, the rest will have been gc'd)
 | ||||
| @ -537,14 +535,14 @@ func testLDBStoreRemoveThenCollectGarbage(t *testing.T) { | ||||
| 		remaining -= putCount | ||||
| 		for putCount > 0 { | ||||
| 			ldb.Put(context.TODO(), chunks[puts]) | ||||
| 			ldb.lock.RLock() | ||||
| 			log.Debug("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt, "cap", capacity, "n", n, "puts", puts, "remaining", remaining, "roundtarget", roundTarget) | ||||
| 			ldb.lock.RUnlock() | ||||
| 			puts++ | ||||
| 			putCount-- | ||||
| 		} | ||||
| 
 | ||||
| 		ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) | ||||
| 		defer cancel() | ||||
| 		waitGc(ctx, ldb) | ||||
| 		waitGc(ldb) | ||||
| 	} | ||||
| 
 | ||||
| 	// expect first surplus chunks to be missing, because they have the smallest access value
 | ||||
| @ -597,9 +595,7 @@ func TestLDBStoreCollectGarbageAccessUnlikeIndex(t *testing.T) { | ||||
| 	} | ||||
| 
 | ||||
| 	// wait for garbage collection to kick in on the responsible actor
 | ||||
| 	ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) | ||||
| 	defer cancel() | ||||
| 	waitGc(ctx, ldb) | ||||
| 	waitGc(ldb) | ||||
| 
 | ||||
| 	var missing int | ||||
| 	for i, ch := range chunks[2 : capacity/2] { | ||||
| @ -788,7 +784,10 @@ func TestCleanIndex(t *testing.T) { | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| func waitGc(ctx context.Context, ldb *LDBStore) { | ||||
| // Note: waitGc does not guarantee that we wait 1 GC round; it only
 | ||||
| // guarantees that if the GC is running we wait for that run to finish
 | ||||
| // ticket: https://github.com/ethersphere/go-ethereum/issues/1151
 | ||||
| func waitGc(ldb *LDBStore) { | ||||
| 	<-ldb.gc.runC | ||||
| 	ldb.gc.runC <- struct{}{} | ||||
| } | ||||
|  | ||||
		Loading…
	
		Reference in New Issue
	
	Block a user