forked from cerc-io/plugeth
all: fix typos in comments (#28881)
This commit is contained in:
parent
8ec638dc5e
commit
8fd43c8013
@ -29,7 +29,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// The ABI holds information about a contract's context and available
|
// The ABI holds information about a contract's context and available
|
||||||
// invokable methods. It will allow you to type check function calls and
|
// invocable methods. It will allow you to type check function calls and
|
||||||
// packs data accordingly.
|
// packs data accordingly.
|
||||||
type ABI struct {
|
type ABI struct {
|
||||||
Constructor Method
|
Constructor Method
|
||||||
|
@ -241,7 +241,7 @@ func (hub *Hub) refreshWallets() {
|
|||||||
card.Disconnect(pcsc.LeaveCard)
|
card.Disconnect(pcsc.LeaveCard)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
// Card connected, start tracking in amongs the wallets
|
// Card connected, start tracking among the wallets
|
||||||
hub.wallets[reader] = wallet
|
hub.wallets[reader] = wallet
|
||||||
events = append(events, accounts.WalletEvent{Wallet: wallet, Kind: accounts.WalletArrived})
|
events = append(events, accounts.WalletEvent{Wallet: wallet, Kind: accounts.WalletArrived})
|
||||||
}
|
}
|
||||||
|
@ -75,7 +75,7 @@ Example:
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"type": "Info",
|
"type": "Info",
|
||||||
"message": "User should see this aswell"
|
"message": "User should see this as well"
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"meta": {
|
"meta": {
|
||||||
|
@ -1673,7 +1673,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool) (int, error)
|
|||||||
// The chain importer is starting and stopping trie prefetchers. If a bad
|
// The chain importer is starting and stopping trie prefetchers. If a bad
|
||||||
// block or other error is hit however, an early return may not properly
|
// block or other error is hit however, an early return may not properly
|
||||||
// terminate the background threads. This defer ensures that we clean up
|
// terminate the background threads. This defer ensures that we clean up
|
||||||
// and dangling prefetcher, without defering each and holding on live refs.
|
// and dangling prefetcher, without deferring each and holding on live refs.
|
||||||
if activeState != nil {
|
if activeState != nil {
|
||||||
activeState.StopPrefetcher()
|
activeState.StopPrefetcher()
|
||||||
}
|
}
|
||||||
|
@ -894,7 +894,7 @@ func getChunk(size int, b int) []byte {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// TODO (?)
|
// TODO (?)
|
||||||
// - test that if we remove several head-files, aswell as data last data-file,
|
// - test that if we remove several head-files, as well as data last data-file,
|
||||||
// the index is truncated accordingly
|
// the index is truncated accordingly
|
||||||
// Right now, the freezer would fail on these conditions:
|
// Right now, the freezer would fail on these conditions:
|
||||||
// 1. have data files d0, d1, d2, d3
|
// 1. have data files d0, d1, d2, d3
|
||||||
|
@ -121,7 +121,7 @@ func prune(snaptree *snapshot.Tree, root common.Hash, maindb ethdb.Database, sta
|
|||||||
// the trie nodes(and codes) belong to the active state will be filtered
|
// the trie nodes(and codes) belong to the active state will be filtered
|
||||||
// out. A very small part of stale tries will also be filtered because of
|
// out. A very small part of stale tries will also be filtered because of
|
||||||
// the false-positive rate of bloom filter. But the assumption is held here
|
// the false-positive rate of bloom filter. But the assumption is held here
|
||||||
// that the false-positive is low enough(~0.05%). The probablity of the
|
// that the false-positive is low enough(~0.05%). The probability of the
|
||||||
// dangling node is the state root is super low. So the dangling nodes in
|
// dangling node is the state root is super low. So the dangling nodes in
|
||||||
// theory will never ever be visited again.
|
// theory will never ever be visited again.
|
||||||
var (
|
var (
|
||||||
|
@ -43,7 +43,7 @@ var (
|
|||||||
aggregatorMemoryLimit = uint64(4 * 1024 * 1024)
|
aggregatorMemoryLimit = uint64(4 * 1024 * 1024)
|
||||||
|
|
||||||
// aggregatorItemLimit is an approximate number of items that will end up
|
// aggregatorItemLimit is an approximate number of items that will end up
|
||||||
// in the agregator layer before it's flushed out to disk. A plain account
|
// in the aggregator layer before it's flushed out to disk. A plain account
|
||||||
// weighs around 14B (+hash), a storage slot 32B (+hash), a deleted slot
|
// weighs around 14B (+hash), a storage slot 32B (+hash), a deleted slot
|
||||||
// 0B (+hash). Slots are mostly set/unset in lockstep, so that average at
|
// 0B (+hash). Slots are mostly set/unset in lockstep, so that average at
|
||||||
// 16B (+hash). All in all, the average entry seems to be 15+32=47B. Use a
|
// 16B (+hash). All in all, the average entry seems to be 15+32=47B. Use a
|
||||||
|
@ -139,7 +139,7 @@ func TestDiskMerge(t *testing.T) {
|
|||||||
// Retrieve all the data through the disk layer and validate it
|
// Retrieve all the data through the disk layer and validate it
|
||||||
base = snaps.Snapshot(diffRoot)
|
base = snaps.Snapshot(diffRoot)
|
||||||
if _, ok := base.(*diskLayer); !ok {
|
if _, ok := base.(*diskLayer); !ok {
|
||||||
t.Fatalf("update not flattend into the disk layer")
|
t.Fatalf("update not flattened into the disk layer")
|
||||||
}
|
}
|
||||||
|
|
||||||
// assertAccount ensures that an account matches the given blob.
|
// assertAccount ensures that an account matches the given blob.
|
||||||
@ -362,7 +362,7 @@ func TestDiskPartialMerge(t *testing.T) {
|
|||||||
// Retrieve all the data through the disk layer and validate it
|
// Retrieve all the data through the disk layer and validate it
|
||||||
base = snaps.Snapshot(diffRoot)
|
base = snaps.Snapshot(diffRoot)
|
||||||
if _, ok := base.(*diskLayer); !ok {
|
if _, ok := base.(*diskLayer); !ok {
|
||||||
t.Fatalf("test %d: update not flattend into the disk layer", i)
|
t.Fatalf("test %d: update not flattened into the disk layer", i)
|
||||||
}
|
}
|
||||||
assertAccount(accNoModNoCache, accNoModNoCache[:])
|
assertAccount(accNoModNoCache, accNoModNoCache[:])
|
||||||
assertAccount(accNoModCache, accNoModCache[:])
|
assertAccount(accNoModCache, accNoModCache[:])
|
||||||
|
@ -237,7 +237,7 @@ func testIterativeStateSync(t *testing.T, count int, commit bool, bypath bool, s
|
|||||||
id := trie.StorageTrieID(srcRoot, common.BytesToHash(node.syncPath[0]), acc.Root)
|
id := trie.StorageTrieID(srcRoot, common.BytesToHash(node.syncPath[0]), acc.Root)
|
||||||
stTrie, err := trie.New(id, ndb)
|
stTrie, err := trie.New(id, ndb)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to retriev storage trie for path %x: %v", node.syncPath[1], err)
|
t.Fatalf("failed to retrieve storage trie for path %x: %v", node.syncPath[1], err)
|
||||||
}
|
}
|
||||||
data, _, err := stTrie.GetNode(node.syncPath[1])
|
data, _, err := stTrie.GetNode(node.syncPath[1])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -458,7 +458,7 @@ func (p *BlobPool) parseTransaction(id uint64, size uint32, blob []byte) error {
|
|||||||
tx := new(types.Transaction)
|
tx := new(types.Transaction)
|
||||||
if err := rlp.DecodeBytes(blob, tx); err != nil {
|
if err := rlp.DecodeBytes(blob, tx); err != nil {
|
||||||
// This path is impossible unless the disk data representation changes
|
// This path is impossible unless the disk data representation changes
|
||||||
// across restarts. For that ever unprobable case, recover gracefully
|
// across restarts. For that ever improbable case, recover gracefully
|
||||||
// by ignoring this data entry.
|
// by ignoring this data entry.
|
||||||
log.Error("Failed to decode blob pool entry", "id", id, "err", err)
|
log.Error("Failed to decode blob pool entry", "id", id, "err", err)
|
||||||
return err
|
return err
|
||||||
@ -479,7 +479,7 @@ func (p *BlobPool) parseTransaction(id uint64, size uint32, blob []byte) error {
|
|||||||
sender, err := p.signer.Sender(tx)
|
sender, err := p.signer.Sender(tx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// This path is impossible unless the signature validity changes across
|
// This path is impossible unless the signature validity changes across
|
||||||
// restarts. For that ever unprobable case, recover gracefully by ignoring
|
// restarts. For that ever improbable case, recover gracefully by ignoring
|
||||||
// this data entry.
|
// this data entry.
|
||||||
log.Error("Failed to recover blob tx sender", "id", id, "hash", tx.Hash(), "err", err)
|
log.Error("Failed to recover blob tx sender", "id", id, "hash", tx.Hash(), "err", err)
|
||||||
return err
|
return err
|
||||||
@ -749,7 +749,7 @@ func (p *BlobPool) recheck(addr common.Address, inclusions map[common.Hash]uint6
|
|||||||
// offload removes a tracked blob transaction from the pool and moves it into the
|
// offload removes a tracked blob transaction from the pool and moves it into the
|
||||||
// limbo for tracking until finality.
|
// limbo for tracking until finality.
|
||||||
//
|
//
|
||||||
// The method may log errors for various unexpcted scenarios but will not return
|
// The method may log errors for various unexpected scenarios but will not return
|
||||||
// any of it since there's no clear error case. Some errors may be due to coding
|
// any of it since there's no clear error case. Some errors may be due to coding
|
||||||
// issues, others caused by signers mining MEV stuff or swapping transactions. In
|
// issues, others caused by signers mining MEV stuff or swapping transactions. In
|
||||||
// all cases, the pool needs to continue operating.
|
// all cases, the pool needs to continue operating.
|
||||||
@ -1201,7 +1201,7 @@ func (p *BlobPool) Get(hash common.Hash) *types.Transaction {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Add inserts a set of blob transactions into the pool if they pass validation (both
|
// Add inserts a set of blob transactions into the pool if they pass validation (both
|
||||||
// consensus validity and pool restictions).
|
// consensus validity and pool restrictions).
|
||||||
func (p *BlobPool) Add(txs []*types.Transaction, local bool, sync bool) []error {
|
func (p *BlobPool) Add(txs []*types.Transaction, local bool, sync bool) []error {
|
||||||
var (
|
var (
|
||||||
adds = make([]*types.Transaction, 0, len(txs))
|
adds = make([]*types.Transaction, 0, len(txs))
|
||||||
@ -1221,7 +1221,7 @@ func (p *BlobPool) Add(txs []*types.Transaction, local bool, sync bool) []error
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Add inserts a new blob transaction into the pool if it passes validation (both
|
// Add inserts a new blob transaction into the pool if it passes validation (both
|
||||||
// consensus validity and pool restictions).
|
// consensus validity and pool restrictions).
|
||||||
func (p *BlobPool) add(tx *types.Transaction) (err error) {
|
func (p *BlobPool) add(tx *types.Transaction) (err error) {
|
||||||
// The blob pool blocks on adding a transaction. This is because blob txs are
|
// The blob pool blocks on adding a transaction. This is because blob txs are
|
||||||
// only even pulled form the network, so this method will act as the overload
|
// only even pulled form the network, so this method will act as the overload
|
||||||
|
@ -635,7 +635,7 @@ func TestOpenDrops(t *testing.T) {
|
|||||||
|
|
||||||
// Tests that transactions loaded from disk are indexed correctly.
|
// Tests that transactions loaded from disk are indexed correctly.
|
||||||
//
|
//
|
||||||
// - 1. Transactions must be groupped by sender, sorted by nonce
|
// - 1. Transactions must be grouped by sender, sorted by nonce
|
||||||
// - 2. Eviction thresholds are calculated correctly for the sequences
|
// - 2. Eviction thresholds are calculated correctly for the sequences
|
||||||
// - 3. Balance usage of an account is totals across all transactions
|
// - 3. Balance usage of an account is totals across all transactions
|
||||||
func TestOpenIndex(t *testing.T) {
|
func TestOpenIndex(t *testing.T) {
|
||||||
@ -649,7 +649,7 @@ func TestOpenIndex(t *testing.T) {
|
|||||||
store, _ := billy.Open(billy.Options{Path: filepath.Join(storage, pendingTransactionStore)}, newSlotter(), nil)
|
store, _ := billy.Open(billy.Options{Path: filepath.Join(storage, pendingTransactionStore)}, newSlotter(), nil)
|
||||||
|
|
||||||
// Insert a sequence of transactions with varying price points to check that
|
// Insert a sequence of transactions with varying price points to check that
|
||||||
// the cumulative minimumw will be maintained.
|
// the cumulative minimum will be maintained.
|
||||||
var (
|
var (
|
||||||
key, _ = crypto.GenerateKey()
|
key, _ = crypto.GenerateKey()
|
||||||
addr = crypto.PubkeyToAddress(key.PublicKey)
|
addr = crypto.PubkeyToAddress(key.PublicKey)
|
||||||
@ -1248,7 +1248,7 @@ func TestAdd(t *testing.T) {
|
|||||||
keys[acc], _ = crypto.GenerateKey()
|
keys[acc], _ = crypto.GenerateKey()
|
||||||
addrs[acc] = crypto.PubkeyToAddress(keys[acc].PublicKey)
|
addrs[acc] = crypto.PubkeyToAddress(keys[acc].PublicKey)
|
||||||
|
|
||||||
// Seed the state database with this acocunt
|
// Seed the state database with this account
|
||||||
statedb.AddBalance(addrs[acc], new(uint256.Int).SetUint64(seed.balance))
|
statedb.AddBalance(addrs[acc], new(uint256.Int).SetUint64(seed.balance))
|
||||||
statedb.SetNonce(addrs[acc], seed.nonce)
|
statedb.SetNonce(addrs[acc], seed.nonce)
|
||||||
|
|
||||||
|
@ -53,7 +53,7 @@ func newLimbo(datadir string) (*limbo, error) {
|
|||||||
index: make(map[common.Hash]uint64),
|
index: make(map[common.Hash]uint64),
|
||||||
groups: make(map[uint64]map[uint64]common.Hash),
|
groups: make(map[uint64]map[uint64]common.Hash),
|
||||||
}
|
}
|
||||||
// Index all limboed blobs on disk and delete anything inprocessable
|
// Index all limboed blobs on disk and delete anything unprocessable
|
||||||
var fails []uint64
|
var fails []uint64
|
||||||
index := func(id uint64, size uint32, data []byte) {
|
index := func(id uint64, size uint32, data []byte) {
|
||||||
if l.parseBlob(id, data) != nil {
|
if l.parseBlob(id, data) != nil {
|
||||||
@ -89,7 +89,7 @@ func (l *limbo) parseBlob(id uint64, data []byte) error {
|
|||||||
item := new(limboBlob)
|
item := new(limboBlob)
|
||||||
if err := rlp.DecodeBytes(data, item); err != nil {
|
if err := rlp.DecodeBytes(data, item); err != nil {
|
||||||
// This path is impossible unless the disk data representation changes
|
// This path is impossible unless the disk data representation changes
|
||||||
// across restarts. For that ever unprobable case, recover gracefully
|
// across restarts. For that ever improbable case, recover gracefully
|
||||||
// by ignoring this data entry.
|
// by ignoring this data entry.
|
||||||
log.Error("Failed to decode blob limbo entry", "id", id, "err", err)
|
log.Error("Failed to decode blob limbo entry", "id", id, "err", err)
|
||||||
return err
|
return err
|
||||||
@ -172,7 +172,7 @@ func (l *limbo) pull(tx common.Hash) (*types.Transaction, error) {
|
|||||||
// update changes the block number under which a blob transaction is tracked. This
|
// update changes the block number under which a blob transaction is tracked. This
|
||||||
// method should be used when a reorg changes a transaction's inclusion block.
|
// method should be used when a reorg changes a transaction's inclusion block.
|
||||||
//
|
//
|
||||||
// The method may log errors for various unexpcted scenarios but will not return
|
// The method may log errors for various unexpected scenarios but will not return
|
||||||
// any of it since there's no clear error case. Some errors may be due to coding
|
// any of it since there's no clear error case. Some errors may be due to coding
|
||||||
// issues, others caused by signers mining MEV stuff or swapping transactions. In
|
// issues, others caused by signers mining MEV stuff or swapping transactions. In
|
||||||
// all cases, the pool needs to continue operating.
|
// all cases, the pool needs to continue operating.
|
||||||
|
@ -75,7 +75,7 @@ type AddressReserver func(addr common.Address, reserve bool) error
|
|||||||
// production, this interface defines the common methods that allow the primary
|
// production, this interface defines the common methods that allow the primary
|
||||||
// transaction pool to manage the subpools.
|
// transaction pool to manage the subpools.
|
||||||
type SubPool interface {
|
type SubPool interface {
|
||||||
// Filter is a selector used to decide whether a transaction whould be added
|
// Filter is a selector used to decide whether a transaction would be added
|
||||||
// to this particular subpool.
|
// to this particular subpool.
|
||||||
Filter(tx *types.Transaction) bool
|
Filter(tx *types.Transaction) bool
|
||||||
|
|
||||||
|
@ -43,7 +43,7 @@ func TestEIP155Signing(t *testing.T) {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
if from != addr {
|
if from != addr {
|
||||||
t.Errorf("exected from and address to be equal. Got %x want %x", from, addr)
|
t.Errorf("expected from and address to be equal. Got %x want %x", from, addr)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -223,7 +223,7 @@ func BenchmarkPrecompiledRipeMD(bench *testing.B) {
|
|||||||
benchmarkPrecompiled("03", t, bench)
|
benchmarkPrecompiled("03", t, bench)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Benchmarks the sample inputs from the identiy precompile.
|
// Benchmarks the sample inputs from the identity precompile.
|
||||||
func BenchmarkPrecompiledIdentity(bench *testing.B) {
|
func BenchmarkPrecompiledIdentity(bench *testing.B) {
|
||||||
t := precompiledTest{
|
t := precompiledTest{
|
||||||
Input: "38d18acb67d25c8bb9942764b62f18e17054f66a817bd4295423adf9ed98873e000000000000000000000000000000000000000000000000000000000000001b38d18acb67d25c8bb9942764b62f18e17054f66a817bd4295423adf9ed98873e789d1dd423d25f0772d2748d60f7e4b81bb14d086eba8e8e8efb6dcff8a4ae02",
|
Input: "38d18acb67d25c8bb9942764b62f18e17054f66a817bd4295423adf9ed98873e000000000000000000000000000000000000000000000000000000000000001b38d18acb67d25c8bb9942764b62f18e17054f66a817bd4295423adf9ed98873e789d1dd423d25f0772d2748d60f7e4b81bb14d086eba8e8e8efb6dcff8a4ae02",
|
||||||
|
@ -147,7 +147,7 @@ func (in *EVMInterpreter) Run(contract *Contract, input []byte, readOnly bool) (
|
|||||||
debug = in.evm.Config.Tracer != nil
|
debug = in.evm.Config.Tracer != nil
|
||||||
)
|
)
|
||||||
// Don't move this deferred function, it's placed before the capturestate-deferred method,
|
// Don't move this deferred function, it's placed before the capturestate-deferred method,
|
||||||
// so that it get's executed _after_: the capturestate needs the stacks before
|
// so that it gets executed _after_: the capturestate needs the stacks before
|
||||||
// they are returned to the pools
|
// they are returned to the pools
|
||||||
defer func() {
|
defer func() {
|
||||||
returnStack(stack)
|
returnStack(stack)
|
||||||
|
@ -22,7 +22,7 @@ import (
|
|||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
// TestJumpTableCopy tests that deep copy is necessery to prevent modify shared jump table
|
// TestJumpTableCopy tests that deep copy is necessary to prevent modify shared jump table
|
||||||
func TestJumpTableCopy(t *testing.T) {
|
func TestJumpTableCopy(t *testing.T) {
|
||||||
tbl := newMergeInstructionSet()
|
tbl := newMergeInstructionSet()
|
||||||
require.Equal(t, uint64(0), tbl[SLOAD].constantGas)
|
require.Equal(t, uint64(0), tbl[SLOAD].constantGas)
|
||||||
|
@ -27,7 +27,7 @@ import (
|
|||||||
// If z is equal to one the point is considered as in affine form.
|
// If z is equal to one the point is considered as in affine form.
|
||||||
type PointG2 [3]fe2
|
type PointG2 [3]fe2
|
||||||
|
|
||||||
// Set copies valeus of one point to another.
|
// Set copies values of one point to another.
|
||||||
func (p *PointG2) Set(p2 *PointG2) *PointG2 {
|
func (p *PointG2) Set(p2 *PointG2) *PointG2 {
|
||||||
p[0].set(&p2[0])
|
p[0].set(&p2[0])
|
||||||
p[1].set(&p2[1])
|
p[1].set(&p2[1])
|
||||||
|
@ -219,7 +219,7 @@
|
|||||||
return this.finalize(result);
|
return this.finalize(result);
|
||||||
},
|
},
|
||||||
|
|
||||||
// finalize recreates a call object using the final desired field oder for json
|
// finalize recreates a call object using the final desired field order for json
|
||||||
// serialization. This is a nicety feature to pass meaningfully ordered results
|
// serialization. This is a nicety feature to pass meaningfully ordered results
|
||||||
// to users who don't interpret it, just display it.
|
// to users who don't interpret it, just display it.
|
||||||
finalize: function(call) {
|
finalize: function(call) {
|
||||||
|
@ -124,9 +124,9 @@ func TestMemCopying(t *testing.T) {
|
|||||||
{0, 100, 0, "", 0}, // No need to pad (0 size)
|
{0, 100, 0, "", 0}, // No need to pad (0 size)
|
||||||
{100, 50, 100, "", 100}, // Should pad 100-150
|
{100, 50, 100, "", 100}, // Should pad 100-150
|
||||||
{100, 50, 5, "", 5}, // Wanted range fully within memory
|
{100, 50, 5, "", 5}, // Wanted range fully within memory
|
||||||
{100, -50, 0, "offset or size must not be negative", 0}, // Errror
|
{100, -50, 0, "offset or size must not be negative", 0}, // Error
|
||||||
{0, 1, 1024*1024 + 1, "reached limit for padding memory slice: 1048578", 0}, // Errror
|
{0, 1, 1024*1024 + 1, "reached limit for padding memory slice: 1048578", 0}, // Error
|
||||||
{10, 0, 1024*1024 + 100, "reached limit for padding memory slice: 1048666", 0}, // Errror
|
{10, 0, 1024*1024 + 100, "reached limit for padding memory slice: 1048666", 0}, // Error
|
||||||
|
|
||||||
} {
|
} {
|
||||||
mem := vm.NewMemory()
|
mem := vm.NewMemory()
|
||||||
|
@ -2031,7 +2031,7 @@ var fromAscii = function(str) {
|
|||||||
*
|
*
|
||||||
* @method transformToFullName
|
* @method transformToFullName
|
||||||
* @param {Object} json-abi
|
* @param {Object} json-abi
|
||||||
* @return {String} full fnction/event name
|
* @return {String} full function/event name
|
||||||
*/
|
*/
|
||||||
var transformToFullName = function (json) {
|
var transformToFullName = function (json) {
|
||||||
if (json.name.indexOf('(') !== -1) {
|
if (json.name.indexOf('(') !== -1) {
|
||||||
@ -2361,7 +2361,7 @@ var isFunction = function (object) {
|
|||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns true if object is Objet, otherwise false
|
* Returns true if object is Object, otherwise false
|
||||||
*
|
*
|
||||||
* @method isObject
|
* @method isObject
|
||||||
* @param {Object}
|
* @param {Object}
|
||||||
@ -2757,7 +2757,7 @@ var Batch = function (web3) {
|
|||||||
* Should be called to add create new request to batch request
|
* Should be called to add create new request to batch request
|
||||||
*
|
*
|
||||||
* @method add
|
* @method add
|
||||||
* @param {Object} jsonrpc requet object
|
* @param {Object} jsonrpc request object
|
||||||
*/
|
*/
|
||||||
Batch.prototype.add = function (request) {
|
Batch.prototype.add = function (request) {
|
||||||
this.requests.push(request);
|
this.requests.push(request);
|
||||||
@ -4559,7 +4559,7 @@ Iban.createIndirect = function (options) {
|
|||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Thos method should be used to check if given string is valid iban object
|
* This method should be used to check if given string is valid iban object
|
||||||
*
|
*
|
||||||
* @method isValid
|
* @method isValid
|
||||||
* @param {String} iban string
|
* @param {String} iban string
|
||||||
@ -6708,7 +6708,7 @@ var exchangeAbi = require('../contracts/SmartExchange.json');
|
|||||||
* @method transfer
|
* @method transfer
|
||||||
* @param {String} from
|
* @param {String} from
|
||||||
* @param {String} to iban
|
* @param {String} to iban
|
||||||
* @param {Value} value to be tranfered
|
* @param {Value} value to be transferred
|
||||||
* @param {Function} callback, callback
|
* @param {Function} callback, callback
|
||||||
*/
|
*/
|
||||||
var transfer = function (eth, from, to, value, callback) {
|
var transfer = function (eth, from, to, value, callback) {
|
||||||
@ -6738,7 +6738,7 @@ var transfer = function (eth, from, to, value, callback) {
|
|||||||
* @method transferToAddress
|
* @method transferToAddress
|
||||||
* @param {String} from
|
* @param {String} from
|
||||||
* @param {String} to
|
* @param {String} to
|
||||||
* @param {Value} value to be tranfered
|
* @param {Value} value to be transferred
|
||||||
* @param {Function} callback, callback
|
* @param {Function} callback, callback
|
||||||
*/
|
*/
|
||||||
var transferToAddress = function (eth, from, to, value, callback) {
|
var transferToAddress = function (eth, from, to, value, callback) {
|
||||||
@ -7092,7 +7092,7 @@ module.exports = transfer;
|
|||||||
/**
|
/**
|
||||||
* Initializes a newly created cipher.
|
* Initializes a newly created cipher.
|
||||||
*
|
*
|
||||||
* @param {number} xformMode Either the encryption or decryption transormation mode constant.
|
* @param {number} xformMode Either the encryption or decryption transformation mode constant.
|
||||||
* @param {WordArray} key The key.
|
* @param {WordArray} key The key.
|
||||||
* @param {Object} cfg (Optional) The configuration options to use for this operation.
|
* @param {Object} cfg (Optional) The configuration options to use for this operation.
|
||||||
*
|
*
|
||||||
@ -9446,7 +9446,7 @@ module.exports = transfer;
|
|||||||
var M_offset_14 = M[offset + 14];
|
var M_offset_14 = M[offset + 14];
|
||||||
var M_offset_15 = M[offset + 15];
|
var M_offset_15 = M[offset + 15];
|
||||||
|
|
||||||
// Working varialbes
|
// Working variables
|
||||||
var a = H[0];
|
var a = H[0];
|
||||||
var b = H[1];
|
var b = H[1];
|
||||||
var c = H[2];
|
var c = H[2];
|
||||||
|
@ -74,7 +74,7 @@ func (g *StandardGauge) Update(v int64) {
|
|||||||
g.value.Store(v)
|
g.value.Store(v)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update updates the gauge's value if v is larger then the current valie.
|
// Update updates the gauge's value if v is larger then the current value.
|
||||||
func (g *StandardGauge) UpdateIfGt(v int64) {
|
func (g *StandardGauge) UpdateIfGt(v int64) {
|
||||||
for {
|
for {
|
||||||
exist := g.value.Load()
|
exist := g.value.Load()
|
||||||
|
@ -888,7 +888,7 @@ func (w *worker) commitTransactions(env *environment, txs *transactionsByPriceAn
|
|||||||
|
|
||||||
// generateParams wraps various of settings for generating sealing task.
|
// generateParams wraps various of settings for generating sealing task.
|
||||||
type generateParams struct {
|
type generateParams struct {
|
||||||
timestamp uint64 // The timstamp for sealing task
|
timestamp uint64 // The timestamp for sealing task
|
||||||
forceTime bool // Flag whether the given timestamp is immutable or not
|
forceTime bool // Flag whether the given timestamp is immutable or not
|
||||||
parentHash common.Hash // Parent block hash, empty means the latest chain head
|
parentHash common.Hash // Parent block hash, empty means the latest chain head
|
||||||
coinbase common.Address // The fee recipient address for including transaction
|
coinbase common.Address // The fee recipient address for including transaction
|
||||||
|
@ -172,7 +172,7 @@ type SimNode struct {
|
|||||||
registerOnce sync.Once
|
registerOnce sync.Once
|
||||||
}
|
}
|
||||||
|
|
||||||
// Close closes the underlaying node.Node to release
|
// Close closes the underlying node.Node to release
|
||||||
// acquired resources.
|
// acquired resources.
|
||||||
func (sn *SimNode) Close() error {
|
func (sn *SimNode) Close() error {
|
||||||
return sn.node.Close()
|
return sn.node.Close()
|
||||||
|
@ -631,7 +631,7 @@ func (api *SignerAPI) SignGnosisSafeTx(ctx context.Context, signerAddress common
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
typedData := gnosisTx.ToTypedData()
|
typedData := gnosisTx.ToTypedData()
|
||||||
// might aswell error early.
|
// might as well error early.
|
||||||
// we are expected to sign. If our calculated hash does not match what they want,
|
// we are expected to sign. If our calculated hash does not match what they want,
|
||||||
// The gnosis safetx input contains a 'safeTxHash' which is the expected safeTxHash that
|
// The gnosis safetx input contains a 'safeTxHash' which is the expected safeTxHash that
|
||||||
sighash, _, err := apitypes.TypedDataAndHash(typedData)
|
sighash, _, err := apitypes.TypedDataAndHash(typedData)
|
||||||
|
@ -389,7 +389,7 @@ func unset(parent node, child node, key []byte, pos int, removeLeft bool) error
|
|||||||
} else {
|
} else {
|
||||||
if bytes.Compare(cld.Key, key[pos:]) > 0 {
|
if bytes.Compare(cld.Key, key[pos:]) > 0 {
|
||||||
// The key of fork shortnode is greater than the
|
// The key of fork shortnode is greater than the
|
||||||
// path(it belongs to the range), unset the entrie
|
// path(it belongs to the range), unset the entries
|
||||||
// branch. The parent must be a fullnode.
|
// branch. The parent must be a fullnode.
|
||||||
fn := parent.(*fullNode)
|
fn := parent.(*fullNode)
|
||||||
fn.Children[key[pos-1]] = nil
|
fn.Children[key[pos-1]] = nil
|
||||||
|
@ -333,7 +333,7 @@ func TestLargeValue(t *testing.T) {
|
|||||||
trie.Hash()
|
trie.Hash()
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestRandomCases tests som cases that were found via random fuzzing
|
// TestRandomCases tests some cases that were found via random fuzzing
|
||||||
func TestRandomCases(t *testing.T) {
|
func TestRandomCases(t *testing.T) {
|
||||||
var rt = []randTestStep{
|
var rt = []randTestStep{
|
||||||
{op: 6, key: common.Hex2Bytes(""), value: common.Hex2Bytes("")}, // step 0
|
{op: 6, key: common.Hex2Bytes(""), value: common.Hex2Bytes("")}, // step 0
|
||||||
|
Loading…
Reference in New Issue
Block a user