crypto, pow, vendor: hash optimizations, mmap ethash

This commit is contained in:
Péter Szilágyi 2017-03-07 20:05:54 +02:00 committed by Felix Lange
parent b7d93500f1
commit 5c8fa6ae1a
12 changed files with 772 additions and 203 deletions

View File

@ -37,10 +37,6 @@ var (
secp256k1_halfN = new(big.Int).Div(secp256k1_N, big.NewInt(2))
)
// Hasher is a repetitive hasher allowing the same hash data structures to be
// reused between hash runs instead of requiring new ones to be created.
type Hasher func(data []byte) []byte
// Keccak256 calculates and returns the Keccak256 hash of the input data.
func Keccak256(data ...[]byte) []byte {
d := sha3.NewKeccak256()
@ -61,22 +57,6 @@ func Keccak256Hash(data ...[]byte) (h common.Hash) {
return h
}
// Keccak256Hasher creates a repetitive Keccak256 hasher, allowing the same hash
// data structures to be reused between hash runs instead of requiring new ones
// to be created.
//
// The returned function is not thread safe!
func Keccak256Hasher() Hasher {
hasher := sha3.NewKeccak256()
return func(data []byte) []byte {
hasher.Write(data)
result := hasher.Sum(nil)
hasher.Reset()
return result
}
}
// Keccak512 calculates and returns the Keccak512 hash of the input data.
func Keccak512(data ...[]byte) []byte {
d := sha3.NewKeccak512()
@ -86,22 +66,6 @@ func Keccak512(data ...[]byte) []byte {
return d.Sum(nil)
}
// Keccak512Hasher creates a repetitive Keccak512 hasher, allowing the same hash
// data structures to be reused between hash runs instead of requiring new ones
// to be created.
//
// The returned function is not thread safe!
func Keccak512Hasher() Hasher {
hasher := sha3.NewKeccak512()
return func(data []byte) []byte {
hasher.Write(data)
result := hasher.Sum(nil)
hasher.Reset()
return result
}
}
// Deprecated: For backward compatibility as other packages depend on these
func Sha3Hash(data ...[]byte) common.Hash { return Keccak256Hash(data...) }

View File

@ -17,20 +17,21 @@
package pow
import (
"bufio"
"bytes"
"errors"
"fmt"
"io/ioutil"
"math"
"math/big"
"math/rand"
"os"
"path/filepath"
"reflect"
"strconv"
"sync"
"time"
"unsafe"
"github.com/ethereum/go-ethereum/common"
mmap "github.com/edsrzf/mmap-go"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/log"
metrics "github.com/rcrowley/go-metrics"
@ -57,10 +58,89 @@ var (
dumpMagic = hexutil.MustDecode("0xfee1deadbaddcafe")
)
// isLittleEndian returns whether the local system is running in little or big
// endian byte order.
func isLittleEndian() bool {
n := uint32(0x01020304)
return *(*byte)(unsafe.Pointer(&n)) == 0x04
}
// memoryMap tries to memory map a file of uint32s for read only access.
func memoryMap(path string) (*os.File, mmap.MMap, []uint32, error) {
file, err := os.OpenFile(path, os.O_RDONLY, 0644)
if err != nil {
return nil, nil, nil, err
}
mem, buffer, err := memoryMapFile(file, false)
if err != nil {
file.Close()
return nil, nil, nil, err
}
return file, mem, buffer, err
}
// memoryMapFile tries to memory map an already opened file descriptor.
func memoryMapFile(file *os.File, write bool) (mmap.MMap, []uint32, error) {
// Try to memory map the file
flag := mmap.RDONLY
if write {
flag = mmap.RDWR
}
mem, err := mmap.Map(file, flag, 0)
if err != nil {
return nil, nil, err
}
// Yay, we managed to memory map the file, here be dragons
header := *(*reflect.SliceHeader)(unsafe.Pointer(&mem))
header.Len /= 4
header.Cap /= 4
return mem, *(*[]uint32)(unsafe.Pointer(&header)), nil
}
// memoryMapAndGenerate tries to memory map a temporary file of uint32s for write
// access, fill it with the data from a generator and then move it into the final
// path requested.
func memoryMapAndGenerate(path string, size uint64, generator func(buffer []uint32)) (*os.File, mmap.MMap, []uint32, error) {
// Ensure the data folder exists
if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil {
return nil, nil, nil, err
}
// Create a huge temporary empty file to fill with data
temp := path + "." + strconv.Itoa(rand.Int())
dump, err := os.Create(temp)
if err != nil {
return nil, nil, nil, err
}
if err = dump.Truncate(int64(size)); err != nil {
return nil, nil, nil, err
}
// Memory map the file for writing and fill it with the generator
mem, buffer, err := memoryMapFile(dump, true)
if err != nil {
dump.Close()
return nil, nil, nil, err
}
generator(buffer)
if err := mem.Flush(); err != nil {
mem.Unmap()
dump.Close()
return nil, nil, nil, err
}
os.Rename(temp, path)
return dump, mem, buffer, nil
}
// cache wraps an ethash cache with some metadata to allow easier concurrent use.
type cache struct {
epoch uint64 // Epoch for which this cache is relevant
cache []uint32 // The actual cache data content
epoch uint64 // Epoch for which this cache is relevant
dump *os.File // File descriptor of the memory mapped cache
mmap mmap.MMap // Memory map itself to unmap before releasing
cache []uint32 // The actual cache data content (may be memory mapped)
used time.Time // Timestamp of the last use for smarter eviction
once sync.Once // Ensures the cache is generated only once
lock sync.Mutex // Ensures thread safety for updating the usage time
@ -71,57 +151,72 @@ func (c *cache) generate(dir string, limit int, test bool) {
c.once.Do(func() {
// If we have a testing cache, generate and return
if test {
rawCache := generateCache(1024, seedHash(c.epoch*epochLength+1))
c.cache = prepare(1024, bytes.NewReader(rawCache))
c.cache = make([]uint32, 1024/4)
generateCache(c.cache, c.epoch, seedHash(c.epoch*epochLength+1))
return
}
// Full cache generation is needed, check cache dir for existing data
// If we don't store anything on disk, generate and return
size := cacheSize(c.epoch*epochLength + 1)
seed := seedHash(c.epoch*epochLength + 1)
path := filepath.Join(dir, fmt.Sprintf("cache-R%d-%x", algorithmRevision, seed))
logger := log.New("seed", hexutil.Bytes(seed))
if dir != "" {
dump, err := os.Open(path)
if err == nil {
logger.Info("Loading ethash cache from disk")
start := time.Now()
c.cache = prepare(size, bufio.NewReader(dump))
logger.Info("Loaded ethash cache from disk", "elapsed", common.PrettyDuration(time.Since(start)))
dump.Close()
return
}
if dir == "" {
c.cache = make([]uint32, size/4)
generateCache(c.cache, c.epoch, seed)
return
}
// No previous disk cache was available, generate on the fly
rawCache := generateCache(size, seed)
c.cache = prepare(size, bytes.NewReader(rawCache))
// Disk storage is needed, this will get fancy
endian := "le"
if !isLittleEndian() {
endian = "be"
}
path := filepath.Join(dir, fmt.Sprintf("cache-R%d-%x.%s", algorithmRevision, seed, endian))
logger := log.New("epoch", c.epoch)
// If a cache directory is given, attempt to serialize for next time
if dir != "" {
// Store the ethash cache to disk
start := time.Now()
if err := os.MkdirAll(filepath.Dir(path), os.ModePerm); err != nil {
logger.Error("Failed to create ethash cache dir", "err", err)
} else if err := ioutil.WriteFile(path, rawCache, os.ModePerm); err != nil {
logger.Error("Failed to write ethash cache to disk", "err", err)
} else {
logger.Info("Stored ethash cache to disk", "elapsed", common.PrettyDuration(time.Since(start)))
}
// Iterate over all previous instances and delete old ones
for ep := int(c.epoch) - limit; ep >= 0; ep-- {
seed := seedHash(uint64(ep)*epochLength + 1)
path := filepath.Join(dir, fmt.Sprintf("cache-R%d-%x", algorithmRevision, seed))
os.Remove(path)
}
// Try to load the file from disk and memory map it
var err error
c.dump, c.mmap, c.cache, err = memoryMap(path)
if err == nil {
logger.Debug("Loaded old ethash cache from disk")
return
}
logger.Debug("Failed to load old ethash cache", "err", err)
// No previous cache available, create a new cache file to fill
c.dump, c.mmap, c.cache, err = memoryMapAndGenerate(path, size, func(buffer []uint32) { generateCache(buffer, c.epoch, seed) })
if err != nil {
logger.Error("Failed to generate mapped ethash cache", "err", err)
c.cache = make([]uint32, size/4)
generateCache(c.cache, c.epoch, seed)
}
// Iterate over all previous instances and delete old ones
for ep := int(c.epoch) - limit; ep >= 0; ep-- {
seed := seedHash(uint64(ep)*epochLength + 1)
path := filepath.Join(dir, fmt.Sprintf("cache-R%d-%x.%s", algorithmRevision, seed, endian))
os.Remove(path)
}
})
}
// release closes any file handlers and memory maps open.
func (c *cache) release() {
if c.mmap != nil {
c.mmap.Unmap()
c.mmap = nil
}
if c.dump != nil {
c.dump.Close()
c.dump = nil
}
}
// dataset wraps an ethash dataset with some metadata to allow easier concurrent use.
type dataset struct {
epoch uint64 // Epoch for which this cache is relevant
epoch uint64 // Epoch for which this cache is relevant
dump *os.File // File descriptor of the memory mapped cache
mmap mmap.MMap // Memory map itself to unmap before releasing
dataset []uint32 // The actual cache data content
used time.Time // Timestamp of the last use for smarter eviction
once sync.Once // Ensures the cache is generated only once
@ -129,78 +224,91 @@ type dataset struct {
}
// generate ensures that the dataset content is generated before use.
func (d *dataset) generate(dir string, limit int, test bool, discard bool) {
func (d *dataset) generate(dir string, limit int, test bool) {
d.once.Do(func() {
// If we have a testing dataset, generate and return
if test {
rawCache := generateCache(1024, seedHash(d.epoch*epochLength+1))
intCache := prepare(1024, bytes.NewReader(rawCache))
cache := make([]uint32, 1024/4)
generateCache(cache, d.epoch, seedHash(d.epoch*epochLength+1))
rawDataset := generateDataset(32*1024, intCache)
d.dataset = prepare(32*1024, bytes.NewReader(rawDataset))
d.dataset = make([]uint32, 32*1024/4)
generateDataset(d.dataset, d.epoch, cache)
return
}
// Full dataset generation is needed, check dataset dir for existing data
// If we don't store anything on disk, generate and return
csize := cacheSize(d.epoch*epochLength + 1)
dsize := datasetSize(d.epoch*epochLength + 1)
seed := seedHash(d.epoch*epochLength + 1)
path := filepath.Join(dir, fmt.Sprintf("full-R%d-%x", algorithmRevision, seed))
logger := log.New("seed", hexutil.Bytes(seed))
if dir == "" {
cache := make([]uint32, csize/4)
generateCache(cache, d.epoch, seed)
if dir != "" {
dump, err := os.Open(path)
if err == nil {
if !discard {
logger.Info("Loading ethash DAG from disk")
start := time.Now()
d.dataset = prepare(dsize, bufio.NewReader(dump))
logger.Info("Loaded ethash DAG from disk", "elapsed", common.PrettyDuration(time.Since(start)))
}
dump.Close()
return
}
d.dataset = make([]uint32, dsize/4)
generateDataset(d.dataset, d.epoch, cache)
}
// No previous disk dataset was available, generate on the fly
rawCache := generateCache(csize, seed)
intCache := prepare(csize, bytes.NewReader(rawCache))
// Disk storage is needed, this will get fancy
endian := "le"
if !isLittleEndian() {
endian = "be"
}
path := filepath.Join(dir, fmt.Sprintf("full-R%d-%x.%s", algorithmRevision, seed, endian))
logger := log.New("epoch", d.epoch)
rawDataset := generateDataset(dsize, intCache)
if !discard {
d.dataset = prepare(dsize, bytes.NewReader(rawDataset))
// Try to load the file from disk and memory map it
var err error
d.dump, d.mmap, d.dataset, err = memoryMap(path)
if err == nil {
logger.Debug("Loaded old ethash dataset from disk")
return
}
// If a dataset directory is given, attempt to serialize for next time
if dir != "" {
// Store the ethash dataset to disk
start := time.Now()
if err := os.MkdirAll(filepath.Dir(path), os.ModePerm); err != nil {
logger.Error("Failed to create ethash DAG dir", "err", err)
} else if err := ioutil.WriteFile(path, rawDataset, os.ModePerm); err != nil {
logger.Error("Failed to write ethash DAG to disk", "err", err)
} else {
logger.Info("Stored ethash DAG to disk", "elapsed", common.PrettyDuration(time.Since(start)))
}
// Iterate over all previous instances and delete old ones
for ep := int(d.epoch) - limit; ep >= 0; ep-- {
seed := seedHash(uint64(ep)*epochLength + 1)
path := filepath.Join(dir, fmt.Sprintf("full-R%d-%x", algorithmRevision, seed))
os.Remove(path)
}
logger.Debug("Failed to load old ethash dataset", "err", err)
// No previous dataset available, create a new dataset file to fill
cache := make([]uint32, csize/4)
generateCache(cache, d.epoch, seed)
d.dump, d.mmap, d.dataset, err = memoryMapAndGenerate(path, dsize, func(buffer []uint32) { generateDataset(buffer, d.epoch, cache) })
if err != nil {
logger.Error("Failed to generate mapped ethash dataset", "err", err)
d.dataset = make([]uint32, dsize/2)
generateDataset(d.dataset, d.epoch, cache)
}
// Iterate over all previous instances and delete old ones
for ep := int(d.epoch) - limit; ep >= 0; ep-- {
seed := seedHash(uint64(ep)*epochLength + 1)
path := filepath.Join(dir, fmt.Sprintf("full-R%d-%x.%s", algorithmRevision, seed, endian))
os.Remove(path)
}
})
}
// release closes any file handlers and memory maps open.
func (d *dataset) release() {
if d.mmap != nil {
d.mmap.Unmap()
d.mmap = nil
}
if d.dump != nil {
d.dump.Close()
d.dump = nil
}
}
// MakeCache generates a new ethash cache and optionally stores it to disk.
func MakeCache(block uint64, dir string) {
c := cache{epoch: block/epochLength + 1}
c.generate(dir, math.MaxInt32, false)
c.release()
}
// MakeDataset generates a new ethash dataset and optionally stores it to disk.
func MakeDataset(block uint64, dir string) {
d := dataset{epoch: block/epochLength + 1}
d.generate(dir, math.MaxInt32, false, true)
d.generate(dir, math.MaxInt32, false)
d.release()
}
// Ethash is a PoW data struture implementing the ethash algorithm.
@ -318,22 +426,26 @@ func (ethash *Ethash) cache(block uint64) []uint32 {
}
}
delete(ethash.caches, evict.epoch)
evict.release()
log.Debug("Evicted ethash cache", "epoch", evict.epoch, "used", evict.used)
log.Trace("Evicted ethash cache", "epoch", evict.epoch, "used", evict.used)
}
// If we have the new cache pre-generated, use that, otherwise create a new one
if ethash.fcache != nil && ethash.fcache.epoch == epoch {
log.Debug("Using pre-generated cache", "epoch", epoch)
log.Trace("Using pre-generated cache", "epoch", epoch)
current, ethash.fcache = ethash.fcache, nil
} else {
log.Debug("Requiring new ethash cache", "epoch", epoch)
log.Trace("Requiring new ethash cache", "epoch", epoch)
current = &cache{epoch: epoch}
}
ethash.caches[epoch] = current
// If we just used up the future cache, or need a refresh, regenerate
if ethash.fcache == nil || ethash.fcache.epoch <= epoch {
log.Debug("Requiring new future ethash cache", "epoch", epoch+1)
if ethash.fcache != nil {
ethash.fcache.release()
}
log.Trace("Requiring new future ethash cache", "epoch", epoch+1)
future = &cache{epoch: epoch + 1}
ethash.fcache = future
}
@ -418,23 +530,27 @@ func (ethash *Ethash) dataset(block uint64) []uint32 {
}
}
delete(ethash.datasets, evict.epoch)
evict.release()
log.Debug("Evicted ethash dataset", "epoch", evict.epoch, "used", evict.used)
log.Trace("Evicted ethash dataset", "epoch", evict.epoch, "used", evict.used)
}
// If we have the new cache pre-generated, use that, otherwise create a new one
if ethash.fdataset != nil && ethash.fdataset.epoch == epoch {
log.Debug("Using pre-generated dataset", "epoch", epoch)
log.Trace("Using pre-generated dataset", "epoch", epoch)
current = &dataset{epoch: ethash.fdataset.epoch} // Reload from disk
ethash.fdataset = nil
} else {
log.Debug("Requiring new ethash dataset", "epoch", epoch)
log.Trace("Requiring new ethash dataset", "epoch", epoch)
current = &dataset{epoch: epoch}
}
ethash.datasets[epoch] = current
// If we just used up the future dataset, or need a refresh, regenerate
if ethash.fdataset == nil || ethash.fdataset.epoch <= epoch {
log.Debug("Requiring new future ethash dataset", "epoch", epoch+1)
if ethash.fdataset != nil {
ethash.fdataset.release()
}
log.Trace("Requiring new future ethash dataset", "epoch", epoch+1)
future = &dataset{epoch: epoch + 1}
ethash.fdataset = future
}
@ -443,7 +559,7 @@ func (ethash *Ethash) dataset(block uint64) []uint32 {
ethash.lock.Unlock()
// Wait for generation finish, bump the timestamp and finalize the cache
current.generate(ethash.dagdir, ethash.dagsondisk, ethash.tester, false)
current.generate(ethash.dagdir, ethash.dagsondisk, ethash.tester)
current.lock.Lock()
current.used = time.Now()
@ -451,7 +567,7 @@ func (ethash *Ethash) dataset(block uint64) []uint32 {
// If we exhausted the future dataset, now's a good time to regenerate it
if future != nil {
go future.generate(ethash.dagdir, ethash.dagsondisk, ethash.tester, true) // Discard results from memorys
go future.generate(ethash.dagdir, ethash.dagsondisk, ethash.tester)
}
return current.dataset
}

View File

@ -18,15 +18,17 @@ package pow
import (
"encoding/binary"
"io"
"hash"
"reflect"
"runtime"
"sync"
"sync/atomic"
"time"
"unsafe"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/crypto/sha3"
"github.com/ethereum/go-ethereum/log"
)
@ -44,6 +46,22 @@ const (
loopAccesses = 64 // Number of accesses in hashimoto loop
)
// hasher is a repetitive hasher allowing the same hash data structures to be
// reused between hash runs instead of requiring new ones to be created.
type hasher func(dest []byte, data []byte)
// makeHasher creates a repetitive hasher, allowing the same hash data structures
// to be reused between hash runs instead of requiring new ones to be created.
//
// The returned function is not thread safe!
func makeHasher(h hash.Hash) hasher {
return func(dest []byte, data []byte) {
h.Write(data)
h.Sum(dest[:0])
h.Reset()
}
}
// seedHash is the seed to use for generating a verification cache and the mining
// dataset.
func seedHash(block uint64) []byte {
@ -51,9 +69,9 @@ func seedHash(block uint64) []byte {
if block < epochLength {
return seed
}
keccak256 := crypto.Keccak256Hasher()
keccak256 := makeHasher(sha3.NewKeccak256())
for i := 0; i < int(block/epochLength); i++ {
seed = keccak256(seed)
keccak256(seed, seed)
}
return seed
}
@ -63,17 +81,30 @@ func seedHash(block uint64) []byte {
// memory, then performing two passes of Sergio Demian Lerner's RandMemoHash
// algorithm from Strict Memory Hard Hashing Functions (2014). The output is a
// set of 524288 64-byte values.
func generateCache(size uint64, seed []byte) []byte {
//
// This method places the result into dest in machine byte order.
func generateCache(dest []uint32, epoch uint64, seed []byte) {
// Print some debug logs to allow analysis on low end devices
logger := log.New("seed", hexutil.Bytes(seed))
logger.Debug("Generating ethash verification cache")
logger := log.New("epoch", epoch)
start := time.Now()
defer func() {
logger.Info("Generated ethash verification cache", "elapsed", common.PrettyDuration(time.Since(start)))
elapsed := time.Since(start)
logFn := logger.Debug
if elapsed > 3*time.Second {
logFn = logger.Info
}
logFn("Generated ethash verification cache", "elapsed", common.PrettyDuration(elapsed))
}()
// Convert our destination slice to a byte buffer
header := *(*reflect.SliceHeader)(unsafe.Pointer(&dest))
header.Len *= 4
header.Cap *= 4
cache := *(*[]byte)(unsafe.Pointer(&header))
// Calculate the number of thoretical rows (we'll store in one buffer nonetheless)
size := uint64(len(cache))
rows := int(size) / hashBytes
// Start a monitoring goroutine to report progress on low end devices
@ -93,13 +124,12 @@ func generateCache(size uint64, seed []byte) []byte {
}
}()
// Create a hasher to reuse between invocations
keccak512 := crypto.Keccak512Hasher()
keccak512 := makeHasher(sha3.NewKeccak512())
// Sequentially produce the initial dataset
cache := make([]byte, size)
copy(cache, keccak512(seed))
keccak512(cache, seed)
for offset := uint64(hashBytes); offset < size; offset += hashBytes {
copy(cache[offset:], keccak512(cache[offset-hashBytes:offset]))
keccak512(cache[offset:], cache[offset-hashBytes:offset])
atomic.AddUint32(&progress, 1)
}
// Use a low-round version of randmemohash
@ -113,26 +143,31 @@ func generateCache(size uint64, seed []byte) []byte {
xorOff = (binary.LittleEndian.Uint32(cache[dstOff:]) % uint32(rows)) * hashBytes
)
xorBytes(temp, cache[srcOff:srcOff+hashBytes], cache[xorOff:xorOff+hashBytes])
copy(cache[dstOff:], keccak512(temp))
keccak512(cache[dstOff:], temp)
atomic.AddUint32(&progress, 1)
}
}
return cache
// Swap the byte order on big endian systems and return
if !isLittleEndian() {
swap(cache)
}
}
// swap changes the byte order of the buffer assuming a uint32 representation.
func swap(buffer []byte) {
for i := 0; i < len(buffer); i += 4 {
binary.BigEndian.PutUint32(buffer[i:], binary.LittleEndian.Uint32(buffer[i:]))
}
}
// prepare converts an ethash cache or dataset from a byte stream into the internal
// int representation. All ethash methods work with ints to avoid constant byte to
// int conversions as well as to handle both little and big endian systems.
func prepare(size uint64, r io.Reader) []uint32 {
ints := make([]uint32, size/4)
buffer := make([]byte, 4)
for i := 0; i < len(ints); i++ {
io.ReadFull(r, buffer)
ints[i] = binary.LittleEndian.Uint32(buffer)
func prepare(dest []uint32, src []byte) {
for i := 0; i < len(dest); i++ {
dest[i] = binary.LittleEndian.Uint32(src[i*4:])
}
return ints
}
// fnv is an algorithm inspired by the FNV hash, which in some cases is used as
@ -152,7 +187,7 @@ func fnvHash(mix []uint32, data []uint32) {
// generateDatasetItem combines data from 256 pseudorandomly selected cache nodes,
// and hashes that to compute a single dataset node.
func generateDatasetItem(cache []uint32, index uint32, keccak512 crypto.Hasher) []byte {
func generateDatasetItem(cache []uint32, index uint32, keccak512 hasher) []byte {
// Calculate the number of thoretical rows (we use one buffer nonetheless)
rows := uint32(len(cache) / hashWords)
@ -163,7 +198,7 @@ func generateDatasetItem(cache []uint32, index uint32, keccak512 crypto.Hasher)
for i := 1; i < hashWords; i++ {
binary.LittleEndian.PutUint32(mix[i*4:], cache[(index%rows)*hashWords+uint32(i)])
}
mix = keccak512(mix)
keccak512(mix, mix)
// Convert the mix to uint32s to avoid constant bit shifting
intMix := make([]uint32, hashWords)
@ -179,22 +214,39 @@ func generateDatasetItem(cache []uint32, index uint32, keccak512 crypto.Hasher)
for i, val := range intMix {
binary.LittleEndian.PutUint32(mix[i*4:], val)
}
return keccak512(mix)
keccak512(mix, mix)
return mix
}
// generateDataset generates the entire ethash dataset for mining.
func generateDataset(size uint64, cache []uint32) []byte {
//
// This method places the result into dest in machine byte order.
func generateDataset(dest []uint32, epoch uint64, cache []uint32) {
// Print some debug logs to allow analysis on low end devices
logger := log.New("size", size)
logger.Debug("Generating ethash dataset")
logger := log.New("epoch", epoch)
defer func(start time.Time) {
logger.Debug("Generated ethash dataset", "elapsed", common.PrettyDuration(time.Since(start)))
elapsed := time.Since(start)
logFn := logger.Debug
if elapsed > 3*time.Second {
logFn = logger.Info
}
logFn("Generated ethash verification cache", "elapsed", common.PrettyDuration(elapsed))
}(time.Now())
// Figure out whether the bytes need to be swapped for the machine
swapped := !isLittleEndian()
// Convert our destination slice to a byte buffer
header := *(*reflect.SliceHeader)(unsafe.Pointer(&dest))
header.Len *= 4
header.Cap *= 4
dataset := *(*[]byte)(unsafe.Pointer(&header))
// Generate the dataset on many goroutines since it takes a while
dataset := make([]byte, size)
threads := runtime.NumCPU()
size := uint64(len(dataset))
var pend sync.WaitGroup
pend.Add(threads)
@ -205,7 +257,7 @@ func generateDataset(size uint64, cache []uint32) []byte {
defer pend.Done()
// Create a hasher to reuse between invocations
keccak512 := crypto.Keccak512Hasher()
keccak512 := makeHasher(sha3.NewKeccak512())
// Calculate the data segment this thread should generate
batch := uint32(size / hashBytes / uint64(threads))
@ -217,7 +269,11 @@ func generateDataset(size uint64, cache []uint32) []byte {
// Calculate the dataset segment
percent := uint32(size / hashBytes / 100)
for index := start; index < limit; index++ {
copy(dataset[index*hashBytes:], generateDatasetItem(cache, index, keccak512))
item := generateDatasetItem(cache, index, keccak512)
if swapped {
swap(item)
}
copy(dataset[index*hashBytes:], item)
if status := atomic.AddUint32(&progress, 1); status%percent == 0 {
logger.Info("Generating DAG in progress", "percentage", uint64(status*100)/(size/hashBytes))
@ -227,8 +283,6 @@ func generateDataset(size uint64, cache []uint32) []byte {
}
// Wait for all the generators to finish and return
pend.Wait()
return dataset
}
// hashimoto aggregates data from the full dataset in order to produce our final
@ -277,7 +331,7 @@ func hashimoto(hash []byte, nonce uint64, size uint64, lookup func(index uint32)
// in-memory cache) in order to produce our final value for a particular header
// hash and nonce.
func hashimotoLight(size uint64, cache []uint32, hash []byte, nonce uint64) ([]byte, []byte) {
keccak512 := crypto.Keccak512Hasher()
keccak512 := makeHasher(sha3.NewKeccak512())
lookup := func(index uint32) []uint32 {
rawData := generateDatasetItem(cache, index, keccak512)

View File

@ -18,21 +18,28 @@ package pow
import (
"bytes"
"io/ioutil"
"math/big"
"os"
"reflect"
"sync"
"testing"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/types"
)
// Tests that verification caches can be correctly generated.
func TestCacheGeneration(t *testing.T) {
tests := []struct {
size uint64
seed []byte
epoch uint64
cache []byte
}{
{
size: 1024,
seed: make([]byte, 32),
size: 1024,
epoch: 0,
cache: hexutil.MustDecode("0x" +
"7ce2991c951f7bf4c4c1bb119887ee07871eb5339d7b97b8588e85c742de90e5bafd5bbe6ce93a134fb6be9ad3e30db99d9528a2ea7846833f52e9ca119b6b54" +
"8979480c46e19972bd0738779c932c1b43e665a2fd3122fc3ddb2691f353ceb0ed3e38b8f51fd55b6940290743563c9f8fa8822e611924657501a12aafab8a8d" +
@ -52,8 +59,8 @@ func TestCacheGeneration(t *testing.T) {
"845f64fd8324bb85312979dead74f764c9677aab89801ad4f927f1c00f12e28f22422bb44200d1969d9ab377dd6b099dc6dbc3222e9321b2c1e84f8e2f07731c"),
},
{
size: 1024,
seed: hexutil.MustDecode("0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e563"),
size: 1024,
epoch: 1,
cache: hexutil.MustDecode("0x" +
"1f56855d59cc5a085720899b4377a0198f1abe948d85fe5820dc0e346b7c0931b9cde8e541d751de3b2b3275d0aabfae316209d5879297d8bd99f8a033c9d4df" +
"35add1029f4e6404a022d504fb8023e42989aba985a65933b0109c7218854356f9284983c9e7de97de591828ae348b63d1fc78d8db58157344d4e06530ffd422" +
@ -74,22 +81,28 @@ func TestCacheGeneration(t *testing.T) {
},
}
for i, tt := range tests {
if cache := generateCache(tt.size, tt.seed); !bytes.Equal(cache, tt.cache) {
t.Errorf("cache %d: content mismatch: have %x, want %x", i, cache, tt.cache)
cache := make([]uint32, tt.size/4)
generateCache(cache, tt.epoch, seedHash(tt.epoch*epochLength+1))
want := make([]uint32, tt.size/4)
prepare(want, tt.cache)
if !reflect.DeepEqual(cache, want) {
t.Errorf("cache %d: content mismatch: have %x, want %x", i, cache, want)
}
}
}
func TestDatasetGeneration(t *testing.T) {
tests := []struct {
epoch uint64
cacheSize uint64
cacheSeed []byte
datasetSize uint64
dataset []byte
}{
{
epoch: 0,
cacheSize: 1024,
cacheSeed: make([]byte, 32),
datasetSize: 32 * 1024,
dataset: hexutil.MustDecode("0x" +
"4bc09fbd530a041dd2ec296110a29e8f130f179c59d223f51ecce3126e8b0c74326abc2f32ccd9d7f976bd0944e3ccf8479db39343cbbffa467046ca97e2da63" +
@ -608,11 +621,17 @@ func TestDatasetGeneration(t *testing.T) {
},
}
for i, tt := range tests {
rawCache := generateCache(tt.cacheSize, tt.cacheSeed)
cache := prepare(uint64(len(rawCache)), bytes.NewReader(rawCache))
cache := make([]uint32, tt.cacheSize/4)
generateCache(cache, tt.epoch, seedHash(tt.epoch*epochLength+1))
if dataset := generateDataset(tt.datasetSize, cache); !bytes.Equal(dataset, tt.dataset) {
t.Errorf("dataset %d: content mismatch: have %x, want %x", i, dataset, tt.dataset)
dataset := make([]uint32, tt.datasetSize/4)
generateDataset(dataset, tt.epoch, cache)
want := make([]uint32, tt.datasetSize/4)
prepare(want, tt.dataset)
if !reflect.DeepEqual(dataset, want) {
t.Errorf("dataset %d: content mismatch: have %x, want %x", i, dataset, want)
}
}
}
@ -621,12 +640,12 @@ func TestDatasetGeneration(t *testing.T) {
// datasets.
func TestHashimoto(t *testing.T) {
// Create the verification cache and mining dataset
var (
rawCache = generateCache(1024, make([]byte, 32))
cache = prepare(uint64(len(rawCache)), bytes.NewReader(rawCache))
rawDataset = generateDataset(32*1024, cache)
dataset = prepare(uint64(len(rawDataset)), bytes.NewReader(rawDataset))
)
cache := make([]uint32, 1024/4)
generateCache(cache, 0, make([]byte, 32))
dataset := make([]uint32, 32*1024/4)
generateDataset(dataset, 0, cache)
// Create a block to verify
hash := hexutil.MustDecode("0xc9149cc0386e689d789a1c2f3d5d169a61a6218ed30e74414dc736e442ef3d1f")
nonce := uint64(0)
@ -650,31 +669,77 @@ func TestHashimoto(t *testing.T) {
}
}
// Tests that caches generated on disk may be done concurrently.
func TestConcurrentDiskCacheGeneration(t *testing.T) {
// Create a temp folder to generate the caches into
cachedir, err := ioutil.TempDir("", "")
if err != nil {
t.Fatalf("Failed to create temporary cache dir: %v", err)
}
defer os.RemoveAll(cachedir)
// Define a heavy enough block, one from mainnet should do
block := types.NewBlockWithHeader(&types.Header{
Number: big.NewInt(3311058),
ParentHash: common.HexToHash("0xd783efa4d392943503f28438ad5830b2d5964696ffc285f338585e9fe0a37a05"),
UncleHash: common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347"),
Coinbase: common.HexToAddress("0xc0ea08a2d404d3172d2add29a45be56da40e2949"),
Root: common.HexToHash("0x77d14e10470b5850332524f8cd6f69ad21f070ce92dca33ab2858300242ef2f1"),
TxHash: common.HexToHash("0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"),
ReceiptHash: common.HexToHash("0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"),
Difficulty: big.NewInt(167925187834220),
GasLimit: big.NewInt(4015682),
GasUsed: big.NewInt(0),
Time: big.NewInt(1488928920),
Extra: []byte("www.bw.com"),
MixDigest: common.HexToHash("0x3e140b0784516af5e5ec6730f2fb20cca22f32be399b9e4ad77d32541f798cd0"),
Nonce: types.EncodeNonce(0xf400cd0006070c49),
})
// Simulate multiple processes sharing the same datadir
var pend sync.WaitGroup
for i := 0; i < 3; i++ {
pend.Add(1)
go func(idx int) {
defer pend.Done()
ethash := NewFullEthash(cachedir, 0, 1, "", 0, 0)
if err := ethash.Verify(block); err != nil {
t.Errorf("proc %d: block verification failed: %v", idx, err)
}
}(i)
}
pend.Wait()
}
// Benchmarks the cache generation performance.
func BenchmarkCacheGeneration(b *testing.B) {
for i := 0; i < b.N; i++ {
generateCache(cacheSize(1), make([]byte, 32))
cache := make([]uint32, cacheSize(1)/4)
generateCache(cache, 0, make([]byte, 32))
}
}
// Benchmarks the dataset (small) generation performance.
func BenchmarkSmallDatasetGeneration(b *testing.B) {
rawCache := generateCache(65536, make([]byte, 32))
cache := prepare(uint64(len(rawCache)), bytes.NewReader(rawCache))
cache := make([]uint32, 65536/4)
generateCache(cache, 0, make([]byte, 32))
b.ResetTimer()
for i := 0; i < b.N; i++ {
generateDataset(32*65536, cache)
dataset := make([]uint32, 32*65536/4)
generateDataset(dataset, 0, cache)
}
}
// Benchmarks the light verification performance.
func BenchmarkHashimotoLight(b *testing.B) {
var (
rawCache = generateCache(cacheSize(1), make([]byte, 32))
cache = prepare(uint64(len(rawCache)), bytes.NewReader(rawCache))
hash = hexutil.MustDecode("0xc9149cc0386e689d789a1c2f3d5d169a61a6218ed30e74414dc736e442ef3d1f")
)
cache := make([]uint32, cacheSize(1)/4)
generateCache(cache, 0, make([]byte, 32))
hash := hexutil.MustDecode("0xc9149cc0386e689d789a1c2f3d5d169a61a6218ed30e74414dc736e442ef3d1f")
b.ResetTimer()
for i := 0; i < b.N; i++ {
hashimotoLight(datasetSize(1), cache, hash, 0)
@ -683,13 +748,14 @@ func BenchmarkHashimotoLight(b *testing.B) {
// Benchmarks the full (small) verification performance.
func BenchmarkHashimotoFullSmall(b *testing.B) {
var (
rawCache = generateCache(65536, make([]byte, 32))
cache = prepare(uint64(len(rawCache)), bytes.NewReader(rawCache))
rawDataset = generateDataset(32*65536, cache)
dataset = prepare(uint64(len(rawDataset)), bytes.NewReader(rawDataset))
hash = hexutil.MustDecode("0xc9149cc0386e689d789a1c2f3d5d169a61a6218ed30e74414dc736e442ef3d1f")
)
cache := make([]uint32, 65536/4)
generateCache(cache, 0, make([]byte, 32))
dataset := make([]uint32, 32*65536/4)
generateDataset(dataset, 0, cache)
hash := hexutil.MustDecode("0xc9149cc0386e689d789a1c2f3d5d169a61a6218ed30e74414dc736e442ef3d1f")
b.ResetTimer()
for i := 0; i < b.N; i++ {
hashimotoFull(32*65536, dataset, hash, 0)

25
vendor/github.com/edsrzf/mmap-go/LICENSE generated vendored Normal file
View File

@ -0,0 +1,25 @@
Copyright (c) 2011, Evan Shaw <edsrzf@gmail.com>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

12
vendor/github.com/edsrzf/mmap-go/README.md generated vendored Normal file
View File

@ -0,0 +1,12 @@
mmap-go
=======
mmap-go is a portable mmap package for the [Go programming language](http://golang.org).
It has been tested on Linux (386, amd64), OS X, and Windows (386). It should also
work on other Unix-like platforms, but hasn't been tested with them. I'm interested
to hear about the results.
I haven't been able to add more features without adding significant complexity,
so mmap-go doesn't support mprotect, mincore, and maybe a few other things.
If you're running on a Unix-like platform and need some of these features,
I suggest Gustavo Niemeyer's [gommap](http://labix.org/gommap).

112
vendor/github.com/edsrzf/mmap-go/mmap.go generated vendored Normal file
View File

@ -0,0 +1,112 @@
// Copyright 2011 Evan Shaw. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This file defines the common package interface and contains a little bit of
// factored out logic.
// Package mmap allows mapping files into memory. It tries to provide a simple, reasonably portable interface,
// but doesn't go out of its way to abstract away every little platform detail.
// This specifically means:
// * forked processes may or may not inherit mappings
// * a file's timestamp may or may not be updated by writes through mappings
// * specifying a size larger than the file's actual size can increase the file's size
// * If the mapped file is being modified by another process while your program's running, don't expect consistent results between platforms
package mmap
import (
"errors"
"os"
"reflect"
"unsafe"
)
const (
// RDONLY maps the memory read-only.
// Attempts to write to the MMap object will result in undefined behavior.
RDONLY = 0
// RDWR maps the memory as read-write. Writes to the MMap object will update the
// underlying file.
RDWR = 1 << iota
// COPY maps the memory as copy-on-write. Writes to the MMap object will affect
// memory, but the underlying file will remain unchanged.
COPY
// If EXEC is set, the mapped memory is marked as executable.
EXEC
)
const (
// If the ANON flag is set, the mapped memory will not be backed by a file.
ANON = 1 << iota
)
// MMap represents a file mapped into memory.
type MMap []byte
// Map maps an entire file into memory.
// If ANON is set in flags, f is ignored.
func Map(f *os.File, prot, flags int) (MMap, error) {
return MapRegion(f, -1, prot, flags, 0)
}
// MapRegion maps part of a file into memory.
// The offset parameter must be a multiple of the system's page size.
// If length < 0, the entire file will be mapped.
// If ANON is set in flags, f is ignored.
func MapRegion(f *os.File, length int, prot, flags int, offset int64) (MMap, error) {
var fd uintptr
if flags&ANON == 0 {
fd = uintptr(f.Fd())
if length < 0 {
fi, err := f.Stat()
if err != nil {
return nil, err
}
length = int(fi.Size())
}
} else {
if length <= 0 {
return nil, errors.New("anonymous mapping requires non-zero length")
}
fd = ^uintptr(0)
}
return mmap(length, uintptr(prot), uintptr(flags), fd, offset)
}
func (m *MMap) header() *reflect.SliceHeader {
return (*reflect.SliceHeader)(unsafe.Pointer(m))
}
// Lock keeps the mapped region in physical memory, ensuring that it will not be
// swapped out.
func (m MMap) Lock() error {
dh := m.header()
return lock(dh.Data, uintptr(dh.Len))
}
// Unlock reverses the effect of Lock, allowing the mapped region to potentially
// be swapped out.
// If m is already unlocked, aan error will result.
func (m MMap) Unlock() error {
dh := m.header()
return unlock(dh.Data, uintptr(dh.Len))
}
// Flush synchronizes the mapping's contents to the file's contents on disk.
func (m MMap) Flush() error {
dh := m.header()
return flush(dh.Data, uintptr(dh.Len))
}
// Unmap deletes the memory mapped region, flushes any remaining changes, and sets
// m to nil.
// Trying to read or write any remaining references to m after Unmap is called will
// result in undefined behavior.
// Unmap should only be called on the slice value that was originally returned from
// a call to Map. Calling Unmap on a derived slice may cause errors.
func (m *MMap) Unmap() error {
dh := m.header()
err := unmap(dh.Data, uintptr(dh.Len))
*m = nil
return err
}

67
vendor/github.com/edsrzf/mmap-go/mmap_unix.go generated vendored Normal file
View File

@ -0,0 +1,67 @@
// Copyright 2011 Evan Shaw. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build darwin dragonfly freebsd linux openbsd solaris netbsd
package mmap
import (
"syscall"
)
func mmap(len int, inprot, inflags, fd uintptr, off int64) ([]byte, error) {
flags := syscall.MAP_SHARED
prot := syscall.PROT_READ
switch {
case inprot&COPY != 0:
prot |= syscall.PROT_WRITE
flags = syscall.MAP_PRIVATE
case inprot&RDWR != 0:
prot |= syscall.PROT_WRITE
}
if inprot&EXEC != 0 {
prot |= syscall.PROT_EXEC
}
if inflags&ANON != 0 {
flags |= syscall.MAP_ANON
}
b, err := syscall.Mmap(int(fd), off, len, prot, flags)
if err != nil {
return nil, err
}
return b, nil
}
func flush(addr, len uintptr) error {
_, _, errno := syscall.Syscall(_SYS_MSYNC, addr, len, _MS_SYNC)
if errno != 0 {
return syscall.Errno(errno)
}
return nil
}
func lock(addr, len uintptr) error {
_, _, errno := syscall.Syscall(syscall.SYS_MLOCK, addr, len, 0)
if errno != 0 {
return syscall.Errno(errno)
}
return nil
}
func unlock(addr, len uintptr) error {
_, _, errno := syscall.Syscall(syscall.SYS_MUNLOCK, addr, len, 0)
if errno != 0 {
return syscall.Errno(errno)
}
return nil
}
func unmap(addr, len uintptr) error {
_, _, errno := syscall.Syscall(syscall.SYS_MUNMAP, addr, len, 0)
if errno != 0 {
return syscall.Errno(errno)
}
return nil
}

125
vendor/github.com/edsrzf/mmap-go/mmap_windows.go generated vendored Normal file
View File

@ -0,0 +1,125 @@
// Copyright 2011 Evan Shaw. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package mmap
import (
"errors"
"os"
"sync"
"syscall"
)
// mmap on Windows is a two-step process.
// First, we call CreateFileMapping to get a handle.
// Then, we call MapviewToFile to get an actual pointer into memory.
// Because we want to emulate a POSIX-style mmap, we don't want to expose
// the handle -- only the pointer. We also want to return only a byte slice,
// not a struct, so it's convenient to manipulate.
// We keep this map so that we can get back the original handle from the memory address.
var handleLock sync.Mutex
var handleMap = map[uintptr]syscall.Handle{}
func mmap(len int, prot, flags, hfile uintptr, off int64) ([]byte, error) {
flProtect := uint32(syscall.PAGE_READONLY)
dwDesiredAccess := uint32(syscall.FILE_MAP_READ)
switch {
case prot&COPY != 0:
flProtect = syscall.PAGE_WRITECOPY
dwDesiredAccess = syscall.FILE_MAP_COPY
case prot&RDWR != 0:
flProtect = syscall.PAGE_READWRITE
dwDesiredAccess = syscall.FILE_MAP_WRITE
}
if prot&EXEC != 0 {
flProtect <<= 4
dwDesiredAccess |= syscall.FILE_MAP_EXECUTE
}
// The maximum size is the area of the file, starting from 0,
// that we wish to allow to be mappable. It is the sum of
// the length the user requested, plus the offset where that length
// is starting from. This does not map the data into memory.
maxSizeHigh := uint32((off + int64(len)) >> 32)
maxSizeLow := uint32((off + int64(len)) & 0xFFFFFFFF)
// TODO: Do we need to set some security attributes? It might help portability.
h, errno := syscall.CreateFileMapping(syscall.Handle(hfile), nil, flProtect, maxSizeHigh, maxSizeLow, nil)
if h == 0 {
return nil, os.NewSyscallError("CreateFileMapping", errno)
}
// Actually map a view of the data into memory. The view's size
// is the length the user requested.
fileOffsetHigh := uint32(off >> 32)
fileOffsetLow := uint32(off & 0xFFFFFFFF)
addr, errno := syscall.MapViewOfFile(h, dwDesiredAccess, fileOffsetHigh, fileOffsetLow, uintptr(len))
if addr == 0 {
return nil, os.NewSyscallError("MapViewOfFile", errno)
}
handleLock.Lock()
handleMap[addr] = h
handleLock.Unlock()
m := MMap{}
dh := m.header()
dh.Data = addr
dh.Len = len
dh.Cap = dh.Len
return m, nil
}
func flush(addr, len uintptr) error {
errno := syscall.FlushViewOfFile(addr, len)
if errno != nil {
return os.NewSyscallError("FlushViewOfFile", errno)
}
handleLock.Lock()
defer handleLock.Unlock()
handle, ok := handleMap[addr]
if !ok {
// should be impossible; we would've errored above
return errors.New("unknown base address")
}
errno = syscall.FlushFileBuffers(handle)
return os.NewSyscallError("FlushFileBuffers", errno)
}
func lock(addr, len uintptr) error {
errno := syscall.VirtualLock(addr, len)
return os.NewSyscallError("VirtualLock", errno)
}
func unlock(addr, len uintptr) error {
errno := syscall.VirtualUnlock(addr, len)
return os.NewSyscallError("VirtualUnlock", errno)
}
func unmap(addr, len uintptr) error {
flush(addr, len)
// Lock the UnmapViewOfFile along with the handleMap deletion.
// As soon as we unmap the view, the OS is free to give the
// same addr to another new map. We don't want another goroutine
// to insert and remove the same addr into handleMap while
// we're trying to remove our old addr/handle pair.
handleLock.Lock()
defer handleLock.Unlock()
err := syscall.UnmapViewOfFile(addr)
if err != nil {
return err
}
handle, ok := handleMap[addr]
if !ok {
// should be impossible; we would've errored above
return errors.New("unknown base address")
}
delete(handleMap, addr)
e := syscall.CloseHandle(syscall.Handle(handle))
return os.NewSyscallError("CloseHandle", e)
}

8
vendor/github.com/edsrzf/mmap-go/msync_netbsd.go generated vendored Normal file
View File

@ -0,0 +1,8 @@
// Copyright 2011 Evan Shaw. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package mmap
const _SYS_MSYNC = 277
const _MS_SYNC = 0x04

14
vendor/github.com/edsrzf/mmap-go/msync_unix.go generated vendored Normal file
View File

@ -0,0 +1,14 @@
// Copyright 2011 Evan Shaw. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build darwin dragonfly freebsd linux openbsd solaris
package mmap
import (
"syscall"
)
const _SYS_MSYNC = syscall.SYS_MSYNC
const _MS_SYNC = syscall.MS_SYNC

6
vendor/vendor.json vendored
View File

@ -34,6 +34,12 @@
"revision": "346938d642f2ec3594ed81d874461961cd0faa76",
"revisionTime": "2016-10-29T20:57:26Z"
},
{
"checksumSHA1": "zYnPsNAVm1/ViwCkN++dX2JQhBo=",
"path": "github.com/edsrzf/mmap-go",
"revision": "935e0e8a636ca4ba70b713f3e38a19e1b77739e8",
"revisionTime": "2016-05-12T03:30:02Z"
},
{
"checksumSHA1": "7oFpbmDfGobwKsFLIf6wMUvVoKw=",
"path": "github.com/fatih/color",