core/rawdb: fsync head data file before closing it (#26490)

This PR fixes an issue which might result in data lost in freezer.

Whenever mutation happens in freezer, all data will be written into head data file
and it will be rotated with a new one in case the size of file reaches the threshold.

Theoretically, the rotated old data file should be fsync'd to prevent data loss.
In freezer.Sync function, we only fsync: (1) index file (2) meta file and (3) head
data file. So this PR forcibly fsync the head data file if mutation happens in the
boundary of data file.
This commit is contained in:
rjl493456442 2023-01-13 18:55:50 +08:00 committed by GitHub
parent a21e963ac2
commit e04d63ebd3
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 8 additions and 5 deletions

View File

@ -86,14 +86,14 @@ func (f *chainFreezer) Close() error {
// This functionality is deliberately broken off from block importing to avoid
// incurring additional data shuffling delays on block propagation.
func (f *chainFreezer) freeze(db ethdb.KeyValueStore) {
nfdb := &nofreezedb{KeyValueStore: db}
var (
backoff bool
triggered chan struct{} // Used in tests
nfdb = &nofreezedb{KeyValueStore: db}
)
timer := time.NewTimer(freezerRecheckInterval)
defer timer.Stop()
for {
select {
case <-f.quit:

View File

@ -852,8 +852,11 @@ func (t *freezerTable) advanceHead() error {
if err != nil {
return err
}
// Close old file, and reopen in RDONLY mode.
// Commit the contents of the old file to stable storage and
// tear it down. It will be re-opened in read-only mode.
if err := t.head.Sync(); err != nil {
return err
}
t.releaseFile(t.headId)
t.openFile(t.headId, openFreezerFileForReadOnly)

View File

@ -190,7 +190,7 @@ func TestFreezerConcurrentModifyTruncate(t *testing.T) {
var item = make([]byte, 256)
for i := 0; i < 1000; i++ {
for i := 0; i < 10; i++ {
// First reset and write 100 items.
if err := f.TruncateHead(0); err != nil {
t.Fatal("truncate failed:", err)