Merge pull request #3795 from fjl/pow-fix-test-mode
pow: fix Search with ethash test mode
This commit is contained in:
commit
61d2150a07
@ -428,7 +428,7 @@ func (ethash *Ethash) cache(block uint64) []uint32 {
|
|||||||
current, future := ethash.caches[epoch], (*cache)(nil)
|
current, future := ethash.caches[epoch], (*cache)(nil)
|
||||||
if current == nil {
|
if current == nil {
|
||||||
// No in-memory cache, evict the oldest if the cache limit was reached
|
// No in-memory cache, evict the oldest if the cache limit was reached
|
||||||
for len(ethash.caches) >= ethash.cachesinmem {
|
for len(ethash.caches) > 0 && len(ethash.caches) >= ethash.cachesinmem {
|
||||||
var evict *cache
|
var evict *cache
|
||||||
for _, cache := range ethash.caches {
|
for _, cache := range ethash.caches {
|
||||||
if evict == nil || evict.used.After(cache.used) {
|
if evict == nil || evict.used.After(cache.used) {
|
||||||
@ -480,22 +480,16 @@ func (ethash *Ethash) cache(block uint64) []uint32 {
|
|||||||
// Search implements PoW, attempting to find a nonce that satisfies the block's
|
// Search implements PoW, attempting to find a nonce that satisfies the block's
|
||||||
// difficulty requirements.
|
// difficulty requirements.
|
||||||
func (ethash *Ethash) Search(block Block, stop <-chan struct{}) (uint64, []byte) {
|
func (ethash *Ethash) Search(block Block, stop <-chan struct{}) (uint64, []byte) {
|
||||||
// Extract some data from the block
|
|
||||||
var (
|
var (
|
||||||
hash = block.HashNoNonce().Bytes()
|
hash = block.HashNoNonce().Bytes()
|
||||||
diff = block.Difficulty()
|
diff = block.Difficulty()
|
||||||
target = new(big.Int).Div(maxUint256, diff)
|
target = new(big.Int).Div(maxUint256, diff)
|
||||||
)
|
dataset = ethash.dataset(block.NumberU64())
|
||||||
// Retrieve the mining dataset
|
|
||||||
dataset, size := ethash.dataset(block.NumberU64()), datasetSize(block.NumberU64())
|
|
||||||
|
|
||||||
// Start generating random nonces until we abort or find a good one
|
|
||||||
var (
|
|
||||||
attempts int64
|
|
||||||
|
|
||||||
rand = rand.New(rand.NewSource(time.Now().UnixNano()))
|
rand = rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||||
nonce = uint64(rand.Int63())
|
nonce = uint64(rand.Int63())
|
||||||
|
attempts int64
|
||||||
)
|
)
|
||||||
|
// Start generating random nonces until we abort or find a good one
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-stop:
|
case <-stop:
|
||||||
@ -511,7 +505,7 @@ func (ethash *Ethash) Search(block Block, stop <-chan struct{}) (uint64, []byte)
|
|||||||
attempts = 0
|
attempts = 0
|
||||||
}
|
}
|
||||||
// Compute the PoW value of this nonce
|
// Compute the PoW value of this nonce
|
||||||
digest, result := hashimotoFull(size, dataset, hash, nonce)
|
digest, result := hashimotoFull(dataset, hash, nonce)
|
||||||
if new(big.Int).SetBytes(result).Cmp(target) <= 0 {
|
if new(big.Int).SetBytes(result).Cmp(target) <= 0 {
|
||||||
return nonce, digest
|
return nonce, digest
|
||||||
}
|
}
|
||||||
@ -532,7 +526,7 @@ func (ethash *Ethash) dataset(block uint64) []uint32 {
|
|||||||
current, future := ethash.datasets[epoch], (*dataset)(nil)
|
current, future := ethash.datasets[epoch], (*dataset)(nil)
|
||||||
if current == nil {
|
if current == nil {
|
||||||
// No in-memory dataset, evict the oldest if the dataset limit was reached
|
// No in-memory dataset, evict the oldest if the dataset limit was reached
|
||||||
for len(ethash.datasets) >= ethash.dagsinmem {
|
for len(ethash.datasets) > 0 && len(ethash.datasets) >= ethash.dagsinmem {
|
||||||
var evict *dataset
|
var evict *dataset
|
||||||
for _, dataset := range ethash.datasets {
|
for _, dataset := range ethash.datasets {
|
||||||
if evict == nil || evict.used.After(dataset.used) {
|
if evict == nil || evict.used.After(dataset.used) {
|
||||||
|
@ -349,12 +349,12 @@ func hashimotoLight(size uint64, cache []uint32, hash []byte, nonce uint64) ([]b
|
|||||||
// hashimotoFull aggregates data from the full dataset (using the full in-memory
|
// hashimotoFull aggregates data from the full dataset (using the full in-memory
|
||||||
// dataset) in order to produce our final value for a particular header hash and
|
// dataset) in order to produce our final value for a particular header hash and
|
||||||
// nonce.
|
// nonce.
|
||||||
func hashimotoFull(size uint64, dataset []uint32, hash []byte, nonce uint64) ([]byte, []byte) {
|
func hashimotoFull(dataset []uint32, hash []byte, nonce uint64) ([]byte, []byte) {
|
||||||
lookup := func(index uint32) []uint32 {
|
lookup := func(index uint32) []uint32 {
|
||||||
offset := index * hashWords
|
offset := index * hashWords
|
||||||
return dataset[offset : offset+hashWords]
|
return dataset[offset : offset+hashWords]
|
||||||
}
|
}
|
||||||
return hashimoto(hash, nonce, size, lookup)
|
return hashimoto(hash, nonce, uint64(len(dataset))*4, lookup)
|
||||||
}
|
}
|
||||||
|
|
||||||
// datasetSizes is a lookup table for the ethash dataset size for the first 2048
|
// datasetSizes is a lookup table for the ethash dataset size for the first 2048
|
||||||
|
@ -660,7 +660,7 @@ func TestHashimoto(t *testing.T) {
|
|||||||
if !bytes.Equal(result, wantResult) {
|
if !bytes.Equal(result, wantResult) {
|
||||||
t.Errorf("light hashimoto result mismatch: have %x, want %x", result, wantResult)
|
t.Errorf("light hashimoto result mismatch: have %x, want %x", result, wantResult)
|
||||||
}
|
}
|
||||||
digest, result = hashimotoFull(32*1024, dataset, hash, nonce)
|
digest, result = hashimotoFull(dataset, hash, nonce)
|
||||||
if !bytes.Equal(digest, wantDigest) {
|
if !bytes.Equal(digest, wantDigest) {
|
||||||
t.Errorf("full hashimoto digest mismatch: have %x, want %x", digest, wantDigest)
|
t.Errorf("full hashimoto digest mismatch: have %x, want %x", digest, wantDigest)
|
||||||
}
|
}
|
||||||
@ -713,6 +713,17 @@ func TestConcurrentDiskCacheGeneration(t *testing.T) {
|
|||||||
pend.Wait()
|
pend.Wait()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestTestMode(t *testing.T) {
|
||||||
|
head := &types.Header{Difficulty: big.NewInt(100)}
|
||||||
|
ethash := NewTestEthash()
|
||||||
|
nonce, mix := ethash.Search(types.NewBlockWithHeader(head), nil)
|
||||||
|
head.Nonce = types.EncodeNonce(nonce)
|
||||||
|
copy(head.MixDigest[:], mix)
|
||||||
|
if err := ethash.Verify(types.NewBlockWithHeader(head)); err != nil {
|
||||||
|
t.Error("unexpected Verify error:", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Benchmarks the cache generation performance.
|
// Benchmarks the cache generation performance.
|
||||||
func BenchmarkCacheGeneration(b *testing.B) {
|
func BenchmarkCacheGeneration(b *testing.B) {
|
||||||
for i := 0; i < b.N; i++ {
|
for i := 0; i < b.N; i++ {
|
||||||
@ -758,6 +769,6 @@ func BenchmarkHashimotoFullSmall(b *testing.B) {
|
|||||||
|
|
||||||
b.ResetTimer()
|
b.ResetTimer()
|
||||||
for i := 0; i < b.N; i++ {
|
for i := 0; i < b.N; i++ {
|
||||||
hashimotoFull(32*65536, dataset, hash, 0)
|
hashimotoFull(dataset, hash, 0)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user