forked from cerc-io/plugeth
consensus/ethash: use atomic type (#27068)
This commit is contained in:
parent
b1972627d9
commit
3768b00747
@ -163,7 +163,7 @@ func generateCache(dest []uint32, epoch uint64, seed []byte) {
|
|||||||
rows := int(size) / hashBytes
|
rows := int(size) / hashBytes
|
||||||
|
|
||||||
// Start a monitoring goroutine to report progress on low end devices
|
// Start a monitoring goroutine to report progress on low end devices
|
||||||
var progress uint32
|
var progress atomic.Uint32
|
||||||
|
|
||||||
done := make(chan struct{})
|
done := make(chan struct{})
|
||||||
defer close(done)
|
defer close(done)
|
||||||
@ -174,7 +174,7 @@ func generateCache(dest []uint32, epoch uint64, seed []byte) {
|
|||||||
case <-done:
|
case <-done:
|
||||||
return
|
return
|
||||||
case <-time.After(3 * time.Second):
|
case <-time.After(3 * time.Second):
|
||||||
logger.Info("Generating ethash verification cache", "percentage", atomic.LoadUint32(&progress)*100/uint32(rows)/(cacheRounds+1), "elapsed", common.PrettyDuration(time.Since(start)))
|
logger.Info("Generating ethash verification cache", "percentage", progress.Load()*100/uint32(rows)/(cacheRounds+1), "elapsed", common.PrettyDuration(time.Since(start)))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
@ -185,7 +185,7 @@ func generateCache(dest []uint32, epoch uint64, seed []byte) {
|
|||||||
keccak512(cache, seed)
|
keccak512(cache, seed)
|
||||||
for offset := uint64(hashBytes); offset < size; offset += hashBytes {
|
for offset := uint64(hashBytes); offset < size; offset += hashBytes {
|
||||||
keccak512(cache[offset:], cache[offset-hashBytes:offset])
|
keccak512(cache[offset:], cache[offset-hashBytes:offset])
|
||||||
atomic.AddUint32(&progress, 1)
|
progress.Add(1)
|
||||||
}
|
}
|
||||||
// Use a low-round version of randmemohash
|
// Use a low-round version of randmemohash
|
||||||
temp := make([]byte, hashBytes)
|
temp := make([]byte, hashBytes)
|
||||||
@ -200,7 +200,7 @@ func generateCache(dest []uint32, epoch uint64, seed []byte) {
|
|||||||
bitutil.XORBytes(temp, cache[srcOff:srcOff+hashBytes], cache[xorOff:xorOff+hashBytes])
|
bitutil.XORBytes(temp, cache[srcOff:srcOff+hashBytes], cache[xorOff:xorOff+hashBytes])
|
||||||
keccak512(cache[dstOff:], temp)
|
keccak512(cache[dstOff:], temp)
|
||||||
|
|
||||||
atomic.AddUint32(&progress, 1)
|
progress.Add(1)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Swap the byte order on big endian systems and return
|
// Swap the byte order on big endian systems and return
|
||||||
@ -299,7 +299,7 @@ func generateDataset(dest []uint32, epoch uint64, cache []uint32) {
|
|||||||
var pend sync.WaitGroup
|
var pend sync.WaitGroup
|
||||||
pend.Add(threads)
|
pend.Add(threads)
|
||||||
|
|
||||||
var progress uint64
|
var progress atomic.Uint64
|
||||||
for i := 0; i < threads; i++ {
|
for i := 0; i < threads; i++ {
|
||||||
go func(id int) {
|
go func(id int) {
|
||||||
defer pend.Done()
|
defer pend.Done()
|
||||||
@ -323,7 +323,7 @@ func generateDataset(dest []uint32, epoch uint64, cache []uint32) {
|
|||||||
}
|
}
|
||||||
copy(dataset[index*hashBytes:], item)
|
copy(dataset[index*hashBytes:], item)
|
||||||
|
|
||||||
if status := atomic.AddUint64(&progress, 1); status%percent == 0 {
|
if status := progress.Add(1); status%percent == 0 {
|
||||||
logger.Info("Generating DAG in progress", "percentage", (status*100)/(size/hashBytes), "elapsed", common.PrettyDuration(time.Since(start)))
|
logger.Info("Generating DAG in progress", "percentage", (status*100)/(size/hashBytes), "elapsed", common.PrettyDuration(time.Since(start)))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -308,12 +308,12 @@ func (c *cache) finalizer() {
|
|||||||
|
|
||||||
// dataset wraps an ethash dataset with some metadata to allow easier concurrent use.
|
// dataset wraps an ethash dataset with some metadata to allow easier concurrent use.
|
||||||
type dataset struct {
|
type dataset struct {
|
||||||
epoch uint64 // Epoch for which this cache is relevant
|
epoch uint64 // Epoch for which this cache is relevant
|
||||||
dump *os.File // File descriptor of the memory mapped cache
|
dump *os.File // File descriptor of the memory mapped cache
|
||||||
mmap mmap.MMap // Memory map itself to unmap before releasing
|
mmap mmap.MMap // Memory map itself to unmap before releasing
|
||||||
dataset []uint32 // The actual cache data content
|
dataset []uint32 // The actual cache data content
|
||||||
once sync.Once // Ensures the cache is generated only once
|
once sync.Once // Ensures the cache is generated only once
|
||||||
done uint32 // Atomic flag to determine generation status
|
done atomic.Bool // Atomic flag to determine generation status
|
||||||
}
|
}
|
||||||
|
|
||||||
// newDataset creates a new ethash mining dataset and returns it as a plain Go
|
// newDataset creates a new ethash mining dataset and returns it as a plain Go
|
||||||
@ -326,7 +326,7 @@ func newDataset(epoch uint64) *dataset {
|
|||||||
func (d *dataset) generate(dir string, limit int, lock bool, test bool) {
|
func (d *dataset) generate(dir string, limit int, lock bool, test bool) {
|
||||||
d.once.Do(func() {
|
d.once.Do(func() {
|
||||||
// Mark the dataset generated after we're done. This is needed for remote
|
// Mark the dataset generated after we're done. This is needed for remote
|
||||||
defer atomic.StoreUint32(&d.done, 1)
|
defer d.done.Store(true)
|
||||||
|
|
||||||
csize := cacheSize(d.epoch*epochLength + 1)
|
csize := cacheSize(d.epoch*epochLength + 1)
|
||||||
dsize := datasetSize(d.epoch*epochLength + 1)
|
dsize := datasetSize(d.epoch*epochLength + 1)
|
||||||
@ -390,7 +390,7 @@ func (d *dataset) generate(dir string, limit int, lock bool, test bool) {
|
|||||||
// or not (it may not have been started at all). This is useful for remote miners
|
// or not (it may not have been started at all). This is useful for remote miners
|
||||||
// to default to verification caches instead of blocking on DAG generations.
|
// to default to verification caches instead of blocking on DAG generations.
|
||||||
func (d *dataset) generated() bool {
|
func (d *dataset) generated() bool {
|
||||||
return atomic.LoadUint32(&d.done) == 1
|
return d.done.Load()
|
||||||
}
|
}
|
||||||
|
|
||||||
// finalizer closes any file handlers and memory maps open.
|
// finalizer closes any file handlers and memory maps open.
|
||||||
|
Loading…
Reference in New Issue
Block a user