mirror of
https://github.com/ethereum/solidity
synced 2023-10-03 13:03:40 +00:00
Do not copy reference types to memory in-place.
This commit is contained in:
parent
342ca94866
commit
37e7f1f10d
110
ArrayUtils.cpp
110
ArrayUtils.cpp
@ -39,6 +39,11 @@ void ArrayUtils::copyArrayToStorage(ArrayType const& _targetType, ArrayType cons
|
||||
|
||||
// stack layout: [source_ref] [source_byte_off] [source length] target_ref target_byte_off (top)
|
||||
solAssert(_targetType.location() == DataLocation::Storage, "");
|
||||
if (_sourceType.location() == DataLocation::Memory)
|
||||
solAssert(
|
||||
_sourceType.getBaseType()->isValueType(),
|
||||
"Copying arrays of non-value-types to storage not yet implemented."
|
||||
);
|
||||
|
||||
IntegerType uint256(256);
|
||||
Type const* targetBaseType = _targetType.isByteArray() ? &uint256 : &(*_targetType.getBaseType());
|
||||
@ -235,8 +240,9 @@ void ArrayUtils::copyArrayToMemory(const ArrayType& _sourceType, bool _padToWord
|
||||
{
|
||||
solAssert(
|
||||
_sourceType.getBaseType()->getCalldataEncodedSize() > 0,
|
||||
"Nested arrays not yet implemented here."
|
||||
"Nested dynamic arrays not implemented here."
|
||||
);
|
||||
CompilerUtils utils(m_context);
|
||||
unsigned baseSize = 1;
|
||||
if (!_sourceType.isByteArray())
|
||||
// We always pad the elements, regardless of _padToWordBoundaries.
|
||||
@ -246,7 +252,7 @@ void ArrayUtils::copyArrayToMemory(const ArrayType& _sourceType, bool _padToWord
|
||||
{
|
||||
if (!_sourceType.isDynamicallySized())
|
||||
m_context << _sourceType.getLength();
|
||||
if (_sourceType.getBaseType()->getCalldataEncodedSize() > 1)
|
||||
if (baseSize > 1)
|
||||
m_context << u256(baseSize) << eth::Instruction::MUL;
|
||||
// stack: target source_offset source_len
|
||||
m_context << eth::Instruction::DUP1 << eth::Instruction::DUP3 << eth::Instruction::DUP5;
|
||||
@ -257,8 +263,36 @@ void ArrayUtils::copyArrayToMemory(const ArrayType& _sourceType, bool _padToWord
|
||||
}
|
||||
else if (_sourceType.location() == DataLocation::Memory)
|
||||
{
|
||||
// memcpy using the built-in contract
|
||||
retrieveLength(_sourceType);
|
||||
// stack: target source length
|
||||
if (!_sourceType.getBaseType()->isValueType())
|
||||
{
|
||||
// copy using a loop
|
||||
m_context << u256(0) << eth::Instruction::SWAP3;
|
||||
// stack: counter source length target
|
||||
auto repeat = m_context.newTag();
|
||||
m_context << repeat;
|
||||
m_context << eth::Instruction::DUP2 << eth::Instruction::DUP5;
|
||||
m_context << eth::Instruction::LT << eth::Instruction::ISZERO;
|
||||
auto loopEnd = m_context.appendConditionalJump();
|
||||
m_context << eth::Instruction::DUP3 << eth::Instruction::DUP5;
|
||||
accessIndex(_sourceType, false);
|
||||
MemoryItem(m_context, *_sourceType.getBaseType(), true).retrieveValue(SourceLocation(), true);
|
||||
if (auto baseArray = dynamic_cast<ArrayType const*>(_sourceType.getBaseType().get()))
|
||||
copyArrayToMemory(*baseArray, _padToWordBoundaries);
|
||||
else
|
||||
utils.storeInMemoryDynamic(*_sourceType.getBaseType());
|
||||
m_context << eth::Instruction::SWAP3 << u256(1) << eth::Instruction::ADD;
|
||||
m_context << eth::Instruction::SWAP3;
|
||||
m_context.appendJumpTo(repeat);
|
||||
m_context << loopEnd;
|
||||
m_context << eth::Instruction::SWAP3;
|
||||
utils.popStackSlots(3);
|
||||
// stack: updated_target_pos
|
||||
return;
|
||||
}
|
||||
|
||||
// memcpy using the built-in contract
|
||||
if (_sourceType.isDynamicallySized())
|
||||
{
|
||||
// change pointer to data part
|
||||
@ -271,7 +305,7 @@ void ArrayUtils::copyArrayToMemory(const ArrayType& _sourceType, bool _padToWord
|
||||
// stack: <target> <source> <size>
|
||||
//@TODO do not use ::CALL if less than 32 bytes?
|
||||
m_context << eth::Instruction::DUP1 << eth::Instruction::DUP4 << eth::Instruction::DUP4;
|
||||
CompilerUtils(m_context).memoryCopy();
|
||||
utils.memoryCopy();
|
||||
|
||||
m_context << eth::Instruction::SWAP1 << eth::Instruction::POP;
|
||||
// stack: <target> <size>
|
||||
@ -345,7 +379,7 @@ void ArrayUtils::copyArrayToMemory(const ArrayType& _sourceType, bool _padToWord
|
||||
{
|
||||
// actual array data is stored at SHA3(storage_offset)
|
||||
m_context << eth::Instruction::SWAP1;
|
||||
CompilerUtils(m_context).computeHashStatic();
|
||||
utils.computeHashStatic();
|
||||
m_context << eth::Instruction::SWAP1;
|
||||
}
|
||||
|
||||
@ -375,7 +409,10 @@ void ArrayUtils::copyArrayToMemory(const ArrayType& _sourceType, bool _padToWord
|
||||
else
|
||||
m_context << eth::Instruction::DUP2 << u256(0);
|
||||
StorageItem(m_context, *_sourceType.getBaseType()).retrieveValue(SourceLocation(), true);
|
||||
CompilerUtils(m_context).storeInMemoryDynamic(*_sourceType.getBaseType());
|
||||
if (auto baseArray = dynamic_cast<ArrayType const*>(_sourceType.getBaseType().get()))
|
||||
copyArrayToMemory(*baseArray, _padToWordBoundaries);
|
||||
else
|
||||
utils.storeInMemoryDynamic(*_sourceType.getBaseType());
|
||||
// increment storage_data_offset and byte offset
|
||||
if (haveByteOffset)
|
||||
incrementByteOffset(storageBytes, 2, 3);
|
||||
@ -387,7 +424,8 @@ void ArrayUtils::copyArrayToMemory(const ArrayType& _sourceType, bool _padToWord
|
||||
}
|
||||
}
|
||||
// check for loop condition
|
||||
m_context << eth::Instruction::DUP1 << eth::dupInstruction(haveByteOffset ? 5 : 4) << eth::Instruction::GT;
|
||||
m_context << eth::Instruction::DUP1 << eth::dupInstruction(haveByteOffset ? 5 : 4);
|
||||
m_context << eth::Instruction::GT;
|
||||
m_context.appendConditionalJumpTo(loopStart);
|
||||
// stack here: memory_end_offset storage_data_offset [storage_byte_offset] memory_offset
|
||||
if (haveByteOffset)
|
||||
@ -597,12 +635,14 @@ void ArrayUtils::convertLengthToSize(ArrayType const& _arrayType, bool _pad) con
|
||||
}
|
||||
else
|
||||
{
|
||||
solAssert(
|
||||
_arrayType.getBaseType()->getCalldataEncodedSize() > 0,
|
||||
"Copying nested dynamic arrays not yet implemented."
|
||||
);
|
||||
if (!_arrayType.isByteArray())
|
||||
m_context << _arrayType.getBaseType()->getCalldataEncodedSize() << eth::Instruction::MUL;
|
||||
{
|
||||
if (_arrayType.location() == DataLocation::Memory)
|
||||
m_context << _arrayType.getBaseType()->memoryHeadSize();
|
||||
else
|
||||
m_context << _arrayType.getBaseType()->getCalldataEncodedSize();
|
||||
m_context << eth::Instruction::MUL;
|
||||
}
|
||||
else if (_pad)
|
||||
m_context << u256(31) << eth::Instruction::ADD
|
||||
<< u256(32) << eth::Instruction::DUP1
|
||||
@ -632,7 +672,7 @@ void ArrayUtils::retrieveLength(ArrayType const& _arrayType) const
|
||||
}
|
||||
}
|
||||
|
||||
void ArrayUtils::accessIndex(ArrayType const& _arrayType) const
|
||||
void ArrayUtils::accessIndex(ArrayType const& _arrayType, bool _doBoundsCheck) const
|
||||
{
|
||||
DataLocation location = _arrayType.location();
|
||||
eth::Instruction load =
|
||||
@ -640,19 +680,22 @@ void ArrayUtils::accessIndex(ArrayType const& _arrayType) const
|
||||
location == DataLocation::Memory ? eth::Instruction::MLOAD :
|
||||
eth::Instruction::CALLDATALOAD;
|
||||
|
||||
// retrieve length
|
||||
if (!_arrayType.isDynamicallySized())
|
||||
m_context << _arrayType.getLength();
|
||||
else if (location == DataLocation::CallData)
|
||||
// length is stored on the stack
|
||||
m_context << eth::Instruction::SWAP1;
|
||||
else
|
||||
m_context << eth::Instruction::DUP2 << load;
|
||||
// stack: <base_ref> <index> <length>
|
||||
// check out-of-bounds access
|
||||
m_context << eth::Instruction::DUP2 << eth::Instruction::LT << eth::Instruction::ISZERO;
|
||||
// out-of-bounds access throws exception
|
||||
m_context.appendConditionalJumpTo(m_context.errorTag());
|
||||
if (_doBoundsCheck)
|
||||
{
|
||||
// retrieve length
|
||||
if (!_arrayType.isDynamicallySized())
|
||||
m_context << _arrayType.getLength();
|
||||
else if (location == DataLocation::CallData)
|
||||
// length is stored on the stack
|
||||
m_context << eth::Instruction::SWAP1;
|
||||
else
|
||||
m_context << eth::Instruction::DUP2 << load;
|
||||
// stack: <base_ref> <index> <length>
|
||||
// check out-of-bounds access
|
||||
m_context << eth::Instruction::DUP2 << eth::Instruction::LT << eth::Instruction::ISZERO;
|
||||
// out-of-bounds access throws exception
|
||||
m_context.appendConditionalJumpTo(m_context.errorTag());
|
||||
}
|
||||
|
||||
// stack: <base_ref> <index>
|
||||
m_context << eth::Instruction::SWAP1;
|
||||
@ -671,18 +714,13 @@ void ArrayUtils::accessIndex(ArrayType const& _arrayType) const
|
||||
if (!_arrayType.isByteArray())
|
||||
{
|
||||
m_context << eth::Instruction::SWAP1;
|
||||
m_context << _arrayType.getBaseType()->getCalldataEncodedSize() << eth::Instruction::MUL;
|
||||
if (location == DataLocation::CallData)
|
||||
m_context << _arrayType.getBaseType()->getCalldataEncodedSize();
|
||||
else
|
||||
m_context << u256(_arrayType.memoryHeadSize());
|
||||
m_context << eth::Instruction::MUL;
|
||||
}
|
||||
m_context << eth::Instruction::ADD;
|
||||
//@todo we should also load if it is a reference type of dynamic length
|
||||
// but we should apply special logic if we load from calldata.
|
||||
if (_arrayType.getBaseType()->isValueType())
|
||||
CompilerUtils(m_context).loadFromMemoryDynamic(
|
||||
*_arrayType.getBaseType(),
|
||||
location == DataLocation::CallData,
|
||||
!_arrayType.isByteArray(),
|
||||
false
|
||||
);
|
||||
break;
|
||||
case DataLocation::Storage:
|
||||
m_context << eth::Instruction::SWAP1;
|
||||
|
15
ArrayUtils.h
15
ArrayUtils.h
@ -44,7 +44,11 @@ public:
|
||||
/// Stack pre: source_reference [source_byte_offset/source_length] target_reference target_byte_offset
|
||||
/// Stack post: target_reference target_byte_offset
|
||||
void copyArrayToStorage(ArrayType const& _targetType, ArrayType const& _sourceType) const;
|
||||
/// Copies an array (which cannot be dynamically nested) from anywhere to memory.
|
||||
/// Copies the data part of an array (which cannot be dynamically nested) from anywhere
|
||||
/// to a given position in memory.
|
||||
/// This always copies contained data as is (i.e. structs and fixed-size arrays are copied in
|
||||
/// place as required by the ABI encoding). Use CompilerUtils::convertType if you want real
|
||||
/// memory copies of nested arrays.
|
||||
/// Stack pre: memory_offset source_item
|
||||
/// Stack post: memory_offest + length(padded)
|
||||
void copyArrayToMemory(ArrayType const& _sourceType, bool _padToWordBoundaries = true) const;
|
||||
@ -74,12 +78,11 @@ public:
|
||||
/// Stack pre: reference (excludes byte offset for dynamic storage arrays)
|
||||
/// Stack post: reference length
|
||||
void retrieveLength(ArrayType const& _arrayType) const;
|
||||
/// Retrieves the value at a specific index. If the location is storage, only retrieves the
|
||||
/// position.
|
||||
/// Performs bounds checking and returns a reference on the stack.
|
||||
/// Stack pre: reference [length] index
|
||||
/// Stack post for storage: slot byte_offset
|
||||
/// Stack post for calldata: value
|
||||
void accessIndex(ArrayType const& _arrayType) const;
|
||||
/// Stack post (storage): storage_slot byte_offset
|
||||
/// Stack post: memory/calldata_offset
|
||||
void accessIndex(ArrayType const& _arrayType, bool _doBoundsCheck = true) const;
|
||||
|
||||
private:
|
||||
/// Adds the given number of bytes to a storage byte offset counter and also increments
|
||||
|
@ -657,7 +657,7 @@ void Compiler::appendStackVariableInitialisation(VariableDeclaration const& _var
|
||||
{
|
||||
CompilerContext::LocationSetter location(m_context, _variable);
|
||||
m_context.addVariable(_variable);
|
||||
ExpressionCompiler(m_context).appendStackVariableInitialisation(*_variable.getType());
|
||||
CompilerUtils(m_context).pushZeroValue(*_variable.getType());
|
||||
}
|
||||
|
||||
void Compiler::compileExpression(Expression const& _expression, TypePointer const& _targetType)
|
||||
|
@ -54,6 +54,13 @@ void CompilerUtils::storeFreeMemoryPointer()
|
||||
m_context << u256(freeMemoryPointer) << eth::Instruction::MSTORE;
|
||||
}
|
||||
|
||||
void CompilerUtils::allocateMemory()
|
||||
{
|
||||
fetchFreeMemoryPointer();
|
||||
m_context << eth::Instruction::SWAP1 << eth::Instruction::DUP2 << eth::Instruction::ADD;
|
||||
storeFreeMemoryPointer();
|
||||
}
|
||||
|
||||
void CompilerUtils::toSizeAfterFreeMemoryPointer()
|
||||
{
|
||||
fetchFreeMemoryPointer();
|
||||
@ -101,17 +108,20 @@ void CompilerUtils::storeInMemory(unsigned _offset)
|
||||
|
||||
void CompilerUtils::storeInMemoryDynamic(Type const& _type, bool _padToWordBoundaries)
|
||||
{
|
||||
if (_type.getCategory() == Type::Category::Array)
|
||||
ArrayUtils(m_context).copyArrayToMemory(
|
||||
dynamic_cast<ArrayType const&>(_type),
|
||||
_padToWordBoundaries
|
||||
);
|
||||
if (auto ref = dynamic_cast<ReferenceType const*>(&_type))
|
||||
{
|
||||
solAssert(ref->location() == DataLocation::Memory, "");
|
||||
storeInMemoryDynamic(IntegerType(256), _padToWordBoundaries);
|
||||
}
|
||||
else
|
||||
{
|
||||
unsigned numBytes = prepareMemoryStore(_type, _padToWordBoundaries);
|
||||
if (numBytes > 0)
|
||||
{
|
||||
solAssert(_type.getSizeOnStack() == 1, "Memory store of types with stack size != 1 not implemented.");
|
||||
solAssert(
|
||||
_type.getSizeOnStack() == 1,
|
||||
"Memory store of types with stack size != 1 not implemented."
|
||||
);
|
||||
m_context << eth::Instruction::DUP2 << eth::Instruction::MSTORE;
|
||||
m_context << u256(numBytes) << eth::Instruction::ADD;
|
||||
}
|
||||
@ -164,7 +174,10 @@ void CompilerUtils::encodeToMemory(
|
||||
type = _givenTypes[i]; // delay conversion
|
||||
else
|
||||
convertType(*_givenTypes[i], *targetType, true);
|
||||
storeInMemoryDynamic(*type, _padToWordBoundaries);
|
||||
if (auto arrayType = dynamic_cast<ArrayType const*>(type.get()))
|
||||
ArrayUtils(m_context).copyArrayToMemory(*arrayType, _padToWordBoundaries);
|
||||
else
|
||||
storeInMemoryDynamic(*type, _padToWordBoundaries);
|
||||
}
|
||||
stackPos += _givenTypes[i]->getSizeOnStack();
|
||||
}
|
||||
@ -207,7 +220,7 @@ void CompilerUtils::encodeToMemory(
|
||||
m_context << eth::swapInstruction(arrayType.getSizeOnStack() + 1) << eth::Instruction::POP;
|
||||
// stack: ... <end_of_mem''> <value...>
|
||||
// copy data part
|
||||
storeInMemoryDynamic(arrayType, true);
|
||||
ArrayUtils(m_context).copyArrayToMemory(arrayType, _padToWordBoundaries);
|
||||
// stack: ... <end_of_mem'''>
|
||||
|
||||
thisDynPointer++;
|
||||
@ -349,63 +362,67 @@ void CompilerUtils::convertType(Type const& _typeOnStack, Type const& _targetTyp
|
||||
{
|
||||
// stack: <source ref> (variably sized)
|
||||
unsigned stackSize = typeOnStack.getSizeOnStack();
|
||||
fetchFreeMemoryPointer();
|
||||
moveIntoStack(stackSize);
|
||||
// stack: <mem start> <source ref> (variably sized)
|
||||
bool fromStorage = (typeOnStack.location() == DataLocation::Storage);
|
||||
if (fromStorage)
|
||||
{
|
||||
stackSize--;
|
||||
// remove storage offset, as requested by ArrayUtils::retrieveLength
|
||||
m_context << eth::Instruction::POP;
|
||||
}
|
||||
ArrayUtils(m_context).retrieveLength(typeOnStack);
|
||||
|
||||
// allocate memory
|
||||
// stack: <source ref> (variably sized) <length>
|
||||
m_context << eth::Instruction::DUP1;
|
||||
ArrayUtils(m_context).convertLengthToSize(targetType, true);
|
||||
// stack: <source ref> (variably sized) <length> <size>
|
||||
if (targetType.isDynamicallySized())
|
||||
m_context << u256(0x20) << eth::Instruction::ADD;
|
||||
allocateMemory();
|
||||
// stack: <source ref> (variably sized) <length> <mem start>
|
||||
m_context << eth::Instruction::DUP1;
|
||||
moveIntoStack(2 + stackSize);
|
||||
if (targetType.isDynamicallySized())
|
||||
{
|
||||
bool fromStorage = (typeOnStack.location() == DataLocation::Storage);
|
||||
// store length
|
||||
if (fromStorage)
|
||||
{
|
||||
stackSize--;
|
||||
// remove storage offset, as requested by ArrayUtils::retrieveLength
|
||||
m_context << eth::Instruction::POP;
|
||||
}
|
||||
ArrayUtils(m_context).retrieveLength(typeOnStack);
|
||||
// Stack: <mem start> <source ref> <length>
|
||||
m_context << eth::dupInstruction(2 + stackSize) << eth::Instruction::MSTORE;
|
||||
m_context << eth::dupInstruction(1 + stackSize) << u256(0x20);
|
||||
m_context << eth::Instruction::ADD;
|
||||
moveIntoStack(stackSize);
|
||||
if (fromStorage)
|
||||
{
|
||||
m_context << u256(0);
|
||||
stackSize++;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
m_context << eth::dupInstruction(1 + stackSize);
|
||||
moveIntoStack(stackSize);
|
||||
}
|
||||
// Stack: <mem start> <mem data start> <value>
|
||||
// Store data part.
|
||||
storeInMemoryDynamic(typeOnStack);
|
||||
// Stack <mem start> <mem end>
|
||||
storeFreeMemoryPointer();
|
||||
}
|
||||
else if (typeOnStack.location() == DataLocation::CallData)
|
||||
{
|
||||
// Stack: <offset> [<length>]
|
||||
// length is present if dynamically sized
|
||||
fetchFreeMemoryPointer();
|
||||
moveIntoStack(typeOnStack.getSizeOnStack());
|
||||
// stack: memptr calldataoffset [<length>]
|
||||
if (typeOnStack.isDynamicallySized())
|
||||
{
|
||||
solAssert(targetType.isDynamicallySized(), "");
|
||||
m_context << eth::Instruction::DUP3 << eth::Instruction::DUP2;
|
||||
m_context << eth::Instruction::DUP2;
|
||||
storeInMemoryDynamic(IntegerType(256));
|
||||
moveIntoStack(typeOnStack.getSizeOnStack());
|
||||
}
|
||||
// stack: <mem start> <source ref> (variably sized) <length> <mem data pos>
|
||||
if (targetType.getBaseType()->isValueType())
|
||||
{
|
||||
copyToStackTop(2 + stackSize, stackSize);
|
||||
if (fromStorage)
|
||||
m_context << u256(0); // add byte offset again
|
||||
ArrayUtils(m_context).copyArrayToMemory(typeOnStack);
|
||||
}
|
||||
else
|
||||
m_context << eth::Instruction::DUP2 << eth::Instruction::SWAP1;
|
||||
// stack: mem_ptr mem_data_ptr calldataoffset [<length>]
|
||||
storeInMemoryDynamic(typeOnStack);
|
||||
storeFreeMemoryPointer();
|
||||
{
|
||||
m_context << u256(0) << eth::Instruction::SWAP1;
|
||||
// stack: <mem start> <source ref> (variably sized) <length> <counter> <mem data pos>
|
||||
auto repeat = m_context.newTag();
|
||||
m_context << repeat;
|
||||
m_context << eth::Instruction::DUP3 << eth::Instruction::DUP3;
|
||||
m_context << eth::Instruction::LT << eth::Instruction::ISZERO;
|
||||
auto loopEnd = m_context.appendConditionalJump();
|
||||
copyToStackTop(3 + stackSize, stackSize);
|
||||
copyToStackTop(2 + stackSize, 1);
|
||||
ArrayUtils(m_context).accessIndex(typeOnStack);
|
||||
MemoryItem(m_context, *typeOnStack.getBaseType(), true).retrieveValue(
|
||||
SourceLocation(),
|
||||
true
|
||||
);
|
||||
convertType(*typeOnStack.getBaseType(), *targetType.getBaseType(), _cleanupNeeded);
|
||||
storeInMemoryDynamic(*targetType.getBaseType(), true);
|
||||
m_context << eth::Instruction::SWAP1 << u256(1) << eth::Instruction::ADD;
|
||||
m_context << eth::Instruction::SWAP1;
|
||||
m_context.appendJumpTo(repeat);
|
||||
m_context << loopEnd;
|
||||
m_context << eth::Instruction::POP;
|
||||
}
|
||||
// stack: <mem start> <source ref> (variably sized) <length> <mem data pos updated>
|
||||
popStackSlots(2 + stackSize);
|
||||
// Stack: <mem start>
|
||||
}
|
||||
// nothing to do for memory to memory
|
||||
break;
|
||||
}
|
||||
default:
|
||||
@ -444,6 +461,57 @@ void CompilerUtils::convertType(Type const& _typeOnStack, Type const& _targetTyp
|
||||
}
|
||||
}
|
||||
|
||||
void CompilerUtils::pushZeroValue(const Type& _type)
|
||||
{
|
||||
auto const* referenceType = dynamic_cast<ReferenceType const*>(&_type);
|
||||
if (!referenceType || referenceType->location() == DataLocation::Storage)
|
||||
{
|
||||
for (size_t i = 0; i < _type.getSizeOnStack(); ++i)
|
||||
m_context << u256(0);
|
||||
return;
|
||||
}
|
||||
solAssert(referenceType->location() == DataLocation::Memory, "");
|
||||
|
||||
m_context << u256(max(32u, _type.getCalldataEncodedSize()));
|
||||
allocateMemory();
|
||||
m_context << eth::Instruction::DUP1;
|
||||
|
||||
if (auto structType = dynamic_cast<StructType const*>(&_type))
|
||||
for (auto const& member: structType->getMembers())
|
||||
{
|
||||
pushZeroValue(*member.type);
|
||||
storeInMemoryDynamic(*member.type);
|
||||
}
|
||||
else if (auto arrayType = dynamic_cast<ArrayType const*>(&_type))
|
||||
{
|
||||
if (arrayType->isDynamicallySized())
|
||||
{
|
||||
// zero length
|
||||
m_context << u256(0);
|
||||
storeInMemoryDynamic(IntegerType(256));
|
||||
}
|
||||
else if (arrayType->getLength() > 0)
|
||||
{
|
||||
m_context << arrayType->getLength() << eth::Instruction::SWAP1;
|
||||
// stack: items_to_do memory_pos
|
||||
auto repeat = m_context.newTag();
|
||||
m_context << repeat;
|
||||
pushZeroValue(*arrayType->getBaseType());
|
||||
storeInMemoryDynamic(*arrayType->getBaseType());
|
||||
m_context << eth::Instruction::SWAP1 << u256(1) << eth::Instruction::SWAP1;
|
||||
m_context << eth::Instruction::SUB << eth::Instruction::SWAP1;
|
||||
m_context << eth::Instruction::DUP2;
|
||||
m_context.appendConditionalJumpTo(repeat);
|
||||
m_context << eth::Instruction::SWAP1 << eth::Instruction::POP;
|
||||
}
|
||||
}
|
||||
else
|
||||
solAssert(false, "Requested initialisation for unknown type: " + _type.toString());
|
||||
|
||||
// remove the updated memory pointer
|
||||
m_context << eth::Instruction::POP;
|
||||
}
|
||||
|
||||
void CompilerUtils::moveToStackVariable(VariableDeclaration const& _variable)
|
||||
{
|
||||
unsigned const stackPosition = m_context.baseToCurrentStackOffset(m_context.getBaseStackOffsetOfVariable(_variable));
|
||||
|
@ -41,6 +41,10 @@ public:
|
||||
void fetchFreeMemoryPointer();
|
||||
/// Stores the free memory pointer from the stack.
|
||||
void storeFreeMemoryPointer();
|
||||
/// Allocates a number of bytes in memory as given on the stack.
|
||||
/// Stack pre: <size>
|
||||
/// Stack post: <mem_start>
|
||||
void allocateMemory();
|
||||
/// Appends code that transforms memptr to (memptr - free_memptr) memptr
|
||||
void toSizeAfterFreeMemoryPointer();
|
||||
|
||||
@ -70,7 +74,8 @@ public:
|
||||
/// @param _type type of the data on the stack
|
||||
void storeInMemory(unsigned _offset);
|
||||
/// Dynamic version of @see storeInMemory, expects the memory offset below the value on the stack
|
||||
/// and also updates that. For arrays, only copies the data part.
|
||||
/// and also updates that. For reference types, only copies the data pointer. Fails for
|
||||
/// non-memory-references.
|
||||
/// @param _padToWordBoundaries if true, adds zeros to pad to multiple of 32 bytes. Array elements
|
||||
/// are always padded (except for byte arrays), regardless of this parameter.
|
||||
/// Stack pre: memory_offset value...
|
||||
@ -107,6 +112,10 @@ public:
|
||||
/// necessary.
|
||||
void convertType(Type const& _typeOnStack, Type const& _targetType, bool _cleanupNeeded = false);
|
||||
|
||||
/// Creates a zero-value for the given type and puts it onto the stack. This might allocate
|
||||
/// memory for memory references.
|
||||
void pushZeroValue(Type const& _type);
|
||||
|
||||
/// Moves the value that is at the top of the stack to a stack variable.
|
||||
void moveToStackVariable(VariableDeclaration const& _variable);
|
||||
/// Copies an item that occupies @a _itemSize stack slots from a stack depth of @a _stackDepth
|
||||
|
@ -56,62 +56,6 @@ void ExpressionCompiler::appendStateVariableInitialization(VariableDeclaration c
|
||||
StorageItem(m_context, _varDecl).storeValue(*_varDecl.getType(), _varDecl.getLocation(), true);
|
||||
}
|
||||
|
||||
void ExpressionCompiler::appendStackVariableInitialisation(Type const& _type, bool _toMemory)
|
||||
{
|
||||
CompilerUtils utils(m_context);
|
||||
auto const* referenceType = dynamic_cast<ReferenceType const*>(&_type);
|
||||
if (!referenceType || referenceType->location() == DataLocation::Storage)
|
||||
{
|
||||
for (size_t i = 0; i < _type.getSizeOnStack(); ++i)
|
||||
m_context << u256(0);
|
||||
if (_toMemory)
|
||||
utils.storeInMemoryDynamic(_type);
|
||||
return;
|
||||
}
|
||||
solAssert(referenceType->location() == DataLocation::Memory, "");
|
||||
if (!_toMemory)
|
||||
{
|
||||
// allocate memory
|
||||
utils.fetchFreeMemoryPointer();
|
||||
m_context << eth::Instruction::DUP1 << u256(max(32u, _type.getCalldataEncodedSize()));
|
||||
m_context << eth::Instruction::ADD;
|
||||
utils.storeFreeMemoryPointer();
|
||||
m_context << eth::Instruction::DUP1;
|
||||
}
|
||||
|
||||
if (auto structType = dynamic_cast<StructType const*>(&_type))
|
||||
for (auto const& member: structType->getMembers())
|
||||
appendStackVariableInitialisation(*member.type, true);
|
||||
else if (auto arrayType = dynamic_cast<ArrayType const*>(&_type))
|
||||
{
|
||||
if (arrayType->isDynamicallySized())
|
||||
{
|
||||
// zero length
|
||||
m_context << u256(0);
|
||||
CompilerUtils(m_context).storeInMemoryDynamic(IntegerType(256));
|
||||
}
|
||||
else if (arrayType->getLength() > 0)
|
||||
{
|
||||
m_context << arrayType->getLength() << eth::Instruction::SWAP1;
|
||||
// stack: items_to_do memory_pos
|
||||
auto repeat = m_context.newTag();
|
||||
m_context << repeat;
|
||||
appendStackVariableInitialisation(*arrayType->getBaseType(), true);
|
||||
m_context << eth::Instruction::SWAP1 << u256(1) << eth::Instruction::SWAP1;
|
||||
m_context << eth::Instruction::SUB << eth::Instruction::SWAP1;
|
||||
m_context << eth::Instruction::DUP2;
|
||||
m_context.appendConditionalJumpTo(repeat);
|
||||
m_context << eth::Instruction::SWAP1 << eth::Instruction::POP;
|
||||
}
|
||||
}
|
||||
else
|
||||
solAssert(false, "Requested initialisation for unknown type: " + _type.toString());
|
||||
|
||||
if (!_toMemory)
|
||||
// remove the updated memory pointer
|
||||
m_context << eth::Instruction::POP;
|
||||
}
|
||||
|
||||
void ExpressionCompiler::appendStateVariableAccessor(VariableDeclaration const& _varDecl)
|
||||
{
|
||||
CompilerContext::LocationSetter locationSetter(m_context, _varDecl);
|
||||
@ -211,6 +155,8 @@ bool ExpressionCompiler::visit(Assignment const& _assignment)
|
||||
TypePointer type = _assignment.getRightHandSide().getType();
|
||||
if (!_assignment.getType()->dataStoredIn(DataLocation::Storage))
|
||||
{
|
||||
//@todo we should delay conversion here if RHS is not in memory, LHS is a MemoryItem
|
||||
// and not dynamically-sized.
|
||||
utils().convertType(*type, *_assignment.getType());
|
||||
type = _assignment.getType();
|
||||
}
|
||||
@ -827,8 +773,9 @@ bool ExpressionCompiler::visit(IndexAccess const& _indexAccess)
|
||||
_indexAccess.getIndexExpression()->accept(*this);
|
||||
// stack layout: <base_ref> [<length>] <index>
|
||||
ArrayUtils(m_context).accessIndex(arrayType);
|
||||
if (arrayType.location() == DataLocation::Storage)
|
||||
switch (arrayType.location())
|
||||
{
|
||||
case DataLocation::Storage:
|
||||
if (arrayType.isByteArray())
|
||||
{
|
||||
solAssert(!arrayType.isString(), "Index access to string is not allowed.");
|
||||
@ -836,6 +783,21 @@ bool ExpressionCompiler::visit(IndexAccess const& _indexAccess)
|
||||
}
|
||||
else
|
||||
setLValueToStorageItem(_indexAccess);
|
||||
break;
|
||||
case DataLocation::Memory:
|
||||
setLValue<MemoryItem>(_indexAccess, *_indexAccess.getType(), !arrayType.isByteArray());
|
||||
break;
|
||||
case DataLocation::CallData:
|
||||
//@todo if we implement this, the value in calldata has to be added to the base offset
|
||||
solAssert(!arrayType.getBaseType()->isDynamicallySized(), "Nested arrays not yet implemented.");
|
||||
if (arrayType.getBaseType()->isValueType())
|
||||
CompilerUtils(m_context).loadFromMemoryDynamic(
|
||||
*arrayType.getBaseType(),
|
||||
true,
|
||||
!arrayType.isByteArray(),
|
||||
false
|
||||
);
|
||||
break;
|
||||
}
|
||||
}
|
||||
else
|
||||
|
@ -64,13 +64,6 @@ public:
|
||||
/// Appends code to set a state variable to its initial value/expression.
|
||||
void appendStateVariableInitialization(VariableDeclaration const& _varDecl);
|
||||
|
||||
/// Appends code to initialise a local variable.
|
||||
/// If @a _toMemory is false, leaves the value on the stack. For memory references, this
|
||||
/// allocates new memory.
|
||||
/// If @a _toMemory is true, directly stores the data in the memory pos on the stack and
|
||||
/// updates it.
|
||||
void appendStackVariableInitialisation(Type const& _type, bool _toMemory = false);
|
||||
|
||||
/// Appends code for a State Variable accessor function
|
||||
void appendStateVariableAccessor(VariableDeclaration const& _varDecl);
|
||||
|
||||
|
56
LValue.cpp
56
LValue.cpp
@ -82,6 +82,62 @@ void StackVariable::setToZero(SourceLocation const& _location, bool) const
|
||||
<< eth::Instruction::POP;
|
||||
}
|
||||
|
||||
MemoryItem::MemoryItem(CompilerContext& _compilerContext, Type const& _type, bool _padded):
|
||||
LValue(_compilerContext, _type),
|
||||
m_padded(_padded)
|
||||
{
|
||||
}
|
||||
|
||||
void MemoryItem::retrieveValue(SourceLocation const&, bool _remove) const
|
||||
{
|
||||
if (m_dataType.isValueType())
|
||||
{
|
||||
if (!_remove)
|
||||
m_context << eth::Instruction::DUP1;
|
||||
CompilerUtils(m_context).loadFromMemoryDynamic(m_dataType, false, m_padded, false);
|
||||
}
|
||||
else
|
||||
m_context << eth::Instruction::MLOAD;
|
||||
}
|
||||
|
||||
void MemoryItem::storeValue(Type const& _sourceType, SourceLocation const&, bool _move) const
|
||||
{
|
||||
CompilerUtils utils(m_context);
|
||||
if (m_dataType.isValueType())
|
||||
{
|
||||
solAssert(_sourceType.isValueType(), "");
|
||||
utils.moveIntoStack(_sourceType.getSizeOnStack());
|
||||
utils.convertType(_sourceType, m_dataType, true);
|
||||
if (!_move)
|
||||
{
|
||||
utils.moveToStackTop(m_dataType.getSizeOnStack());
|
||||
utils.copyToStackTop(2, m_dataType.getSizeOnStack());
|
||||
}
|
||||
utils.storeInMemoryDynamic(m_dataType, m_padded);
|
||||
m_context << eth::Instruction::POP;
|
||||
}
|
||||
else
|
||||
{
|
||||
solAssert(_sourceType == m_dataType, "Conversion not implemented for assignment to memory.");
|
||||
|
||||
solAssert(m_dataType.getSizeOnStack() == 1, "");
|
||||
if (!_move)
|
||||
m_context << eth::Instruction::DUP2 << eth::Instruction::SWAP1;
|
||||
// stack: [value] value lvalue
|
||||
// only store the reference
|
||||
m_context << eth::Instruction::MSTORE;
|
||||
}
|
||||
}
|
||||
|
||||
void MemoryItem::setToZero(SourceLocation const&, bool _removeReference) const
|
||||
{
|
||||
CompilerUtils utils(m_context);
|
||||
if (!_removeReference)
|
||||
m_context << eth::Instruction::DUP1;
|
||||
utils.pushZeroValue(m_dataType);
|
||||
utils.storeInMemoryDynamic(m_dataType, m_padded);
|
||||
m_context << eth::Instruction::POP;
|
||||
}
|
||||
|
||||
StorageItem::StorageItem(CompilerContext& _compilerContext, Declaration const& _declaration):
|
||||
StorageItem(_compilerContext, *_declaration.getType())
|
||||
|
23
LValue.h
23
LValue.h
@ -97,6 +97,29 @@ private:
|
||||
unsigned m_size;
|
||||
};
|
||||
|
||||
/**
|
||||
* Reference to some item in memory.
|
||||
*/
|
||||
class MemoryItem: public LValue
|
||||
{
|
||||
public:
|
||||
MemoryItem(CompilerContext& _compilerContext, Type const& _type, bool _padded);
|
||||
virtual unsigned sizeOnStack() const override { return 1; }
|
||||
virtual void retrieveValue(SourceLocation const& _location, bool _remove = false) const override;
|
||||
virtual void storeValue(
|
||||
Type const& _sourceType,
|
||||
SourceLocation const& _location = SourceLocation(),
|
||||
bool _move = false
|
||||
) const override;
|
||||
virtual void setToZero(
|
||||
SourceLocation const& _location = SourceLocation(),
|
||||
bool _removeReference = true
|
||||
) const override;
|
||||
private:
|
||||
/// Special flag to deal with byte array elements.
|
||||
bool m_padded = false;
|
||||
};
|
||||
|
||||
/**
|
||||
* Reference to some item in storage. On the stack this is <storage key> <offset_inside_value>,
|
||||
* where 0 <= offset_inside_value < 32 and an offset of i means that the value is multiplied
|
||||
|
5
Types.h
5
Types.h
@ -179,6 +179,9 @@ public:
|
||||
/// is not a simple big-endian encoding or the type cannot be stored in calldata.
|
||||
/// If @a _padded then it is assumed that each element is padded to a multiple of 32 bytes.
|
||||
virtual unsigned getCalldataEncodedSize(bool _padded) const { (void)_padded; return 0; }
|
||||
/// @returns the size of this data type in bytes when stored in memory. For memory-reference
|
||||
/// types, this is the size of the memory pointer.
|
||||
virtual unsigned memoryHeadSize() const { return getCalldataEncodedSize(); }
|
||||
/// Convenience version of @see getCalldataEncodedSize(bool)
|
||||
unsigned getCalldataEncodedSize() const { return getCalldataEncodedSize(true); }
|
||||
/// @returns true if the type is dynamically encoded in calldata
|
||||
@ -373,6 +376,8 @@ public:
|
||||
explicit ReferenceType(DataLocation _location): m_location(_location) {}
|
||||
DataLocation location() const { return m_location; }
|
||||
|
||||
virtual unsigned memoryHeadSize() const override { return 32; }
|
||||
|
||||
/// @returns a copy of this type with location (recursively) changed to @a _location,
|
||||
/// whereas isPointer is only shallowly changed - the deep copy is always a bound reference.
|
||||
virtual TypePointer copyForLocation(DataLocation _location, bool _isPointer) const = 0;
|
||||
|
Loading…
Reference in New Issue
Block a user