mirror of
https://github.com/ethereum/solidity
synced 2023-10-03 13:03:40 +00:00
Some fixes for calldata arrays.
This commit is contained in:
parent
37e7f1f10d
commit
109b4eafb9
@ -39,11 +39,6 @@ void ArrayUtils::copyArrayToStorage(ArrayType const& _targetType, ArrayType cons
|
||||
|
||||
// stack layout: [source_ref] [source_byte_off] [source length] target_ref target_byte_off (top)
|
||||
solAssert(_targetType.location() == DataLocation::Storage, "");
|
||||
if (_sourceType.location() == DataLocation::Memory)
|
||||
solAssert(
|
||||
_sourceType.getBaseType()->isValueType(),
|
||||
"Copying arrays of non-value-types to storage not yet implemented."
|
||||
);
|
||||
|
||||
IntegerType uint256(256);
|
||||
Type const* targetBaseType = _targetType.isByteArray() ? &uint256 : &(*_targetType.getBaseType());
|
||||
@ -139,14 +134,14 @@ void ArrayUtils::copyArrayToStorage(ArrayType const& _targetType, ArrayType cons
|
||||
if (sourceBaseType->getCategory() == Type::Category::Array)
|
||||
{
|
||||
solAssert(byteOffsetSize == 0, "Byte offset for array as base type.");
|
||||
auto const& sourceBaseArrayType = dynamic_cast<ArrayType const&>(*sourceBaseType);
|
||||
m_context << eth::Instruction::DUP3;
|
||||
if (sourceIsStorage)
|
||||
m_context << u256(0);
|
||||
else if (sourceBaseArrayType.location() == DataLocation::Memory)
|
||||
m_context << eth::Instruction::MLOAD;
|
||||
m_context << eth::dupInstruction(sourceIsStorage ? 4 : 3) << u256(0);
|
||||
copyArrayToStorage(
|
||||
dynamic_cast<ArrayType const&>(*targetBaseType),
|
||||
dynamic_cast<ArrayType const&>(*sourceBaseType)
|
||||
);
|
||||
copyArrayToStorage(dynamic_cast<ArrayType const&>(*targetBaseType), sourceBaseArrayType);
|
||||
m_context << eth::Instruction::POP << eth::Instruction::POP;
|
||||
}
|
||||
else if (directCopy)
|
||||
@ -193,11 +188,18 @@ void ArrayUtils::copyArrayToStorage(ArrayType const& _targetType, ArrayType cons
|
||||
if (haveByteOffsetSource)
|
||||
incrementByteOffset(sourceBaseType->getStorageBytes(), 1, haveByteOffsetTarget ? 5 : 4);
|
||||
else
|
||||
{
|
||||
m_context << eth::swapInstruction(2 + byteOffsetSize);
|
||||
if (sourceIsStorage)
|
||||
m_context << sourceBaseType->getStorageSize();
|
||||
else if (_sourceType.location() == DataLocation::Memory)
|
||||
m_context << sourceBaseType->memoryHeadSize();
|
||||
else
|
||||
m_context << sourceBaseType->getCalldataEncodedSize(true);
|
||||
m_context
|
||||
<< eth::swapInstruction(2 + byteOffsetSize)
|
||||
<< (sourceIsStorage ? sourceBaseType->getStorageSize() : sourceBaseType->getCalldataEncodedSize())
|
||||
<< eth::Instruction::ADD
|
||||
<< eth::swapInstruction(2 + byteOffsetSize);
|
||||
}
|
||||
// increment target
|
||||
if (haveByteOffsetTarget)
|
||||
incrementByteOffset(targetBaseType->getStorageBytes(), byteOffsetSize, byteOffsetSize + 2);
|
||||
@ -696,6 +698,9 @@ void ArrayUtils::accessIndex(ArrayType const& _arrayType, bool _doBoundsCheck) c
|
||||
// out-of-bounds access throws exception
|
||||
m_context.appendConditionalJumpTo(m_context.errorTag());
|
||||
}
|
||||
else if (location == DataLocation::CallData && _arrayType.isDynamicallySized())
|
||||
// remove length if present
|
||||
m_context << eth::Instruction::SWAP1 << eth::Instruction::POP;
|
||||
|
||||
// stack: <base_ref> <index>
|
||||
m_context << eth::Instruction::SWAP1;
|
||||
|
@ -261,7 +261,7 @@ void Compiler::appendCalldataUnpacker(
|
||||
{
|
||||
// We do not check the calldata size, everything is zero-paddedd
|
||||
|
||||
//@todo this does not yet support nested arrays
|
||||
//@todo this does not yet support nested dynamic arrays
|
||||
|
||||
if (_startOffset == u256(-1))
|
||||
_startOffset = u256(CompilerUtils::dataStartOffset);
|
||||
@ -279,6 +279,12 @@ void Compiler::appendCalldataUnpacker(
|
||||
solAssert(!arrayType.getBaseType()->isDynamicallySized(), "Nested arrays not yet implemented.");
|
||||
if (_fromMemory)
|
||||
{
|
||||
solAssert(
|
||||
arrayType.getBaseType()->isValueType(),
|
||||
"Nested memory arrays not yet implemented here."
|
||||
);
|
||||
// @todo If base type is an array or struct, it is still calldata-style encoded, so
|
||||
// we would have to convert it like below.
|
||||
solAssert(arrayType.location() == DataLocation::Memory, "");
|
||||
// compute data pointer
|
||||
m_context << eth::Instruction::DUP1 << eth::Instruction::MLOAD;
|
||||
@ -311,6 +317,7 @@ void Compiler::appendCalldataUnpacker(
|
||||
}
|
||||
if (arrayType.location() == DataLocation::Memory)
|
||||
{
|
||||
// stack: calldata_ref [length] next_calldata
|
||||
// copy to memory
|
||||
// move calldata type up again
|
||||
CompilerUtils(m_context).moveIntoStack(calldataType->getSizeOnStack());
|
||||
|
@ -390,6 +390,7 @@ void CompilerUtils::convertType(Type const& _typeOnStack, Type const& _targetTyp
|
||||
// stack: <mem start> <source ref> (variably sized) <length> <mem data pos>
|
||||
if (targetType.getBaseType()->isValueType())
|
||||
{
|
||||
solAssert(typeOnStack.getBaseType()->isValueType(), "");
|
||||
copyToStackTop(2 + stackSize, stackSize);
|
||||
if (fromStorage)
|
||||
m_context << u256(0); // add byte offset again
|
||||
@ -406,11 +407,7 @@ void CompilerUtils::convertType(Type const& _typeOnStack, Type const& _targetTyp
|
||||
auto loopEnd = m_context.appendConditionalJump();
|
||||
copyToStackTop(3 + stackSize, stackSize);
|
||||
copyToStackTop(2 + stackSize, 1);
|
||||
ArrayUtils(m_context).accessIndex(typeOnStack);
|
||||
MemoryItem(m_context, *typeOnStack.getBaseType(), true).retrieveValue(
|
||||
SourceLocation(),
|
||||
true
|
||||
);
|
||||
ArrayUtils(m_context).accessIndex(typeOnStack, false);
|
||||
convertType(*typeOnStack.getBaseType(), *targetType.getBaseType(), _cleanupNeeded);
|
||||
storeInMemoryDynamic(*targetType.getBaseType(), true);
|
||||
m_context << eth::Instruction::SWAP1 << u256(1) << eth::Instruction::ADD;
|
||||
|
Loading…
Reference in New Issue
Block a user