pow: make data dumps backwards compatible, fix DAG end
This commit is contained in:
parent
5c8fa6ae1a
commit
f3579f6460
@ -32,12 +32,12 @@ import (
|
|||||||
"unsafe"
|
"unsafe"
|
||||||
|
|
||||||
mmap "github.com/edsrzf/mmap-go"
|
mmap "github.com/edsrzf/mmap-go"
|
||||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
metrics "github.com/rcrowley/go-metrics"
|
metrics "github.com/rcrowley/go-metrics"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
ErrInvalidDumpMagic = errors.New("invalid dump magic")
|
||||||
ErrNonceOutOfRange = errors.New("nonce out of range")
|
ErrNonceOutOfRange = errors.New("nonce out of range")
|
||||||
ErrInvalidDifficulty = errors.New("non-positive difficulty")
|
ErrInvalidDifficulty = errors.New("non-positive difficulty")
|
||||||
ErrInvalidMixDigest = errors.New("invalid mix digest")
|
ErrInvalidMixDigest = errors.New("invalid mix digest")
|
||||||
@ -55,7 +55,7 @@ var (
|
|||||||
algorithmRevision = 23
|
algorithmRevision = 23
|
||||||
|
|
||||||
// dumpMagic is a dataset dump header to sanity check a data dump.
|
// dumpMagic is a dataset dump header to sanity check a data dump.
|
||||||
dumpMagic = hexutil.MustDecode("0xfee1deadbaddcafe")
|
dumpMagic = []uint32{0xbaddcafe, 0xfee1dead}
|
||||||
)
|
)
|
||||||
|
|
||||||
// isLittleEndian returns whether the local system is running in little or big
|
// isLittleEndian returns whether the local system is running in little or big
|
||||||
@ -76,7 +76,14 @@ func memoryMap(path string) (*os.File, mmap.MMap, []uint32, error) {
|
|||||||
file.Close()
|
file.Close()
|
||||||
return nil, nil, nil, err
|
return nil, nil, nil, err
|
||||||
}
|
}
|
||||||
return file, mem, buffer, err
|
for i, magic := range dumpMagic {
|
||||||
|
if buffer[i] != magic {
|
||||||
|
mem.Unmap()
|
||||||
|
file.Close()
|
||||||
|
return nil, nil, nil, ErrInvalidDumpMagic
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return file, mem, buffer[len(dumpMagic):], err
|
||||||
}
|
}
|
||||||
|
|
||||||
// memoryMapFile tries to memory map an already opened file descriptor.
|
// memoryMapFile tries to memory map an already opened file descriptor.
|
||||||
@ -113,7 +120,7 @@ func memoryMapAndGenerate(path string, size uint64, generator func(buffer []uint
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, nil, err
|
return nil, nil, nil, err
|
||||||
}
|
}
|
||||||
if err = dump.Truncate(int64(size)); err != nil {
|
if err = dump.Truncate(int64(len(dumpMagic))*4 + int64(size)); err != nil {
|
||||||
return nil, nil, nil, err
|
return nil, nil, nil, err
|
||||||
}
|
}
|
||||||
// Memory map the file for writing and fill it with the generator
|
// Memory map the file for writing and fill it with the generator
|
||||||
@ -122,7 +129,10 @@ func memoryMapAndGenerate(path string, size uint64, generator func(buffer []uint
|
|||||||
dump.Close()
|
dump.Close()
|
||||||
return nil, nil, nil, err
|
return nil, nil, nil, err
|
||||||
}
|
}
|
||||||
generator(buffer)
|
copy(buffer, dumpMagic)
|
||||||
|
|
||||||
|
data := buffer[len(dumpMagic):]
|
||||||
|
generator(data)
|
||||||
|
|
||||||
if err := mem.Flush(); err != nil {
|
if err := mem.Flush(); err != nil {
|
||||||
mem.Unmap()
|
mem.Unmap()
|
||||||
@ -130,7 +140,7 @@ func memoryMapAndGenerate(path string, size uint64, generator func(buffer []uint
|
|||||||
return nil, nil, nil, err
|
return nil, nil, nil, err
|
||||||
}
|
}
|
||||||
os.Rename(temp, path)
|
os.Rename(temp, path)
|
||||||
return dump, mem, buffer, nil
|
return dump, mem, data, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// cache wraps an ethash cache with some metadata to allow easier concurrent use.
|
// cache wraps an ethash cache with some metadata to allow easier concurrent use.
|
||||||
@ -165,11 +175,11 @@ func (c *cache) generate(dir string, limit int, test bool) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
// Disk storage is needed, this will get fancy
|
// Disk storage is needed, this will get fancy
|
||||||
endian := "le"
|
var endian string
|
||||||
if !isLittleEndian() {
|
if !isLittleEndian() {
|
||||||
endian = "be"
|
endian = ".be"
|
||||||
}
|
}
|
||||||
path := filepath.Join(dir, fmt.Sprintf("cache-R%d-%x.%s", algorithmRevision, seed, endian))
|
path := filepath.Join(dir, fmt.Sprintf("cache-R%d-%x%s", algorithmRevision, seed[:8], endian))
|
||||||
logger := log.New("epoch", c.epoch)
|
logger := log.New("epoch", c.epoch)
|
||||||
|
|
||||||
// Try to load the file from disk and memory map it
|
// Try to load the file from disk and memory map it
|
||||||
@ -192,7 +202,7 @@ func (c *cache) generate(dir string, limit int, test bool) {
|
|||||||
// Iterate over all previous instances and delete old ones
|
// Iterate over all previous instances and delete old ones
|
||||||
for ep := int(c.epoch) - limit; ep >= 0; ep-- {
|
for ep := int(c.epoch) - limit; ep >= 0; ep-- {
|
||||||
seed := seedHash(uint64(ep)*epochLength + 1)
|
seed := seedHash(uint64(ep)*epochLength + 1)
|
||||||
path := filepath.Join(dir, fmt.Sprintf("cache-R%d-%x.%s", algorithmRevision, seed, endian))
|
path := filepath.Join(dir, fmt.Sprintf("cache-R%d-%x%s", algorithmRevision, seed[:8], endian))
|
||||||
os.Remove(path)
|
os.Remove(path)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
@ -249,11 +259,11 @@ func (d *dataset) generate(dir string, limit int, test bool) {
|
|||||||
generateDataset(d.dataset, d.epoch, cache)
|
generateDataset(d.dataset, d.epoch, cache)
|
||||||
}
|
}
|
||||||
// Disk storage is needed, this will get fancy
|
// Disk storage is needed, this will get fancy
|
||||||
endian := "le"
|
var endian string
|
||||||
if !isLittleEndian() {
|
if !isLittleEndian() {
|
||||||
endian = "be"
|
endian = ".be"
|
||||||
}
|
}
|
||||||
path := filepath.Join(dir, fmt.Sprintf("full-R%d-%x.%s", algorithmRevision, seed, endian))
|
path := filepath.Join(dir, fmt.Sprintf("full-R%d-%x%s", algorithmRevision, seed[:8], endian))
|
||||||
logger := log.New("epoch", d.epoch)
|
logger := log.New("epoch", d.epoch)
|
||||||
|
|
||||||
// Try to load the file from disk and memory map it
|
// Try to load the file from disk and memory map it
|
||||||
@ -279,7 +289,7 @@ func (d *dataset) generate(dir string, limit int, test bool) {
|
|||||||
// Iterate over all previous instances and delete old ones
|
// Iterate over all previous instances and delete old ones
|
||||||
for ep := int(d.epoch) - limit; ep >= 0; ep-- {
|
for ep := int(d.epoch) - limit; ep >= 0; ep-- {
|
||||||
seed := seedHash(uint64(ep)*epochLength + 1)
|
seed := seedHash(uint64(ep)*epochLength + 1)
|
||||||
path := filepath.Join(dir, fmt.Sprintf("full-R%d-%x.%s", algorithmRevision, seed, endian))
|
path := filepath.Join(dir, fmt.Sprintf("full-R%d-%x%s", algorithmRevision, seed[:8], endian))
|
||||||
os.Remove(path)
|
os.Remove(path)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
@ -225,7 +225,8 @@ func generateDataset(dest []uint32, epoch uint64, cache []uint32) {
|
|||||||
// Print some debug logs to allow analysis on low end devices
|
// Print some debug logs to allow analysis on low end devices
|
||||||
logger := log.New("epoch", epoch)
|
logger := log.New("epoch", epoch)
|
||||||
|
|
||||||
defer func(start time.Time) {
|
start := time.Now()
|
||||||
|
defer func() {
|
||||||
elapsed := time.Since(start)
|
elapsed := time.Since(start)
|
||||||
|
|
||||||
logFn := logger.Debug
|
logFn := logger.Debug
|
||||||
@ -233,7 +234,7 @@ func generateDataset(dest []uint32, epoch uint64, cache []uint32) {
|
|||||||
logFn = logger.Info
|
logFn = logger.Info
|
||||||
}
|
}
|
||||||
logFn("Generated ethash verification cache", "elapsed", common.PrettyDuration(elapsed))
|
logFn("Generated ethash verification cache", "elapsed", common.PrettyDuration(elapsed))
|
||||||
}(time.Now())
|
}()
|
||||||
|
|
||||||
// Figure out whether the bytes need to be swapped for the machine
|
// Figure out whether the bytes need to be swapped for the machine
|
||||||
swapped := !isLittleEndian()
|
swapped := !isLittleEndian()
|
||||||
@ -260,15 +261,15 @@ func generateDataset(dest []uint32, epoch uint64, cache []uint32) {
|
|||||||
keccak512 := makeHasher(sha3.NewKeccak512())
|
keccak512 := makeHasher(sha3.NewKeccak512())
|
||||||
|
|
||||||
// Calculate the data segment this thread should generate
|
// Calculate the data segment this thread should generate
|
||||||
batch := uint32(size / hashBytes / uint64(threads))
|
batch := uint32((size + hashBytes*uint64(threads) - 1) / (hashBytes * uint64(threads)))
|
||||||
start := uint32(id) * batch
|
first := uint32(id) * batch
|
||||||
limit := start + batch
|
limit := first + batch
|
||||||
if limit > uint32(size/hashBytes) {
|
if limit > uint32(size/hashBytes) {
|
||||||
limit = uint32(size / hashBytes)
|
limit = uint32(size / hashBytes)
|
||||||
}
|
}
|
||||||
// Calculate the dataset segment
|
// Calculate the dataset segment
|
||||||
percent := uint32(size / hashBytes / 100)
|
percent := uint32(size / hashBytes / 100)
|
||||||
for index := start; index < limit; index++ {
|
for index := first; index < limit; index++ {
|
||||||
item := generateDatasetItem(cache, index, keccak512)
|
item := generateDatasetItem(cache, index, keccak512)
|
||||||
if swapped {
|
if swapped {
|
||||||
swap(item)
|
swap(item)
|
||||||
@ -276,7 +277,7 @@ func generateDataset(dest []uint32, epoch uint64, cache []uint32) {
|
|||||||
copy(dataset[index*hashBytes:], item)
|
copy(dataset[index*hashBytes:], item)
|
||||||
|
|
||||||
if status := atomic.AddUint32(&progress, 1); status%percent == 0 {
|
if status := atomic.AddUint32(&progress, 1); status%percent == 0 {
|
||||||
logger.Info("Generating DAG in progress", "percentage", uint64(status*100)/(size/hashBytes))
|
logger.Info("Generating DAG in progress", "percentage", uint64(status*100)/(size/hashBytes), "elapsed", common.PrettyDuration(time.Since(start)))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}(i)
|
}(i)
|
||||||
|
Loading…
Reference in New Issue
Block a user