/*
This file is part of solidity.
solidity is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
solidity is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with solidity. If not, see .
*/
// SPDX-License-Identifier: GPL-3.0
/**
* @author Christian
* @date 2014
* Solidity AST to EVM bytecode compiler for expressions.
*/
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
using namespace std;
using namespace solidity;
using namespace solidity::evmasm;
using namespace solidity::frontend;
using namespace solidity::langutil;
using namespace solidity::util;
namespace
{
Type const* closestType(Type const* _type, Type const* _targetType, bool _isShiftOp)
{
if (_isShiftOp)
return _type->mobileType();
else if (auto const* tupleType = dynamic_cast(_type))
{
solAssert(_targetType, "");
TypePointers const& targetComponents = dynamic_cast(*_targetType).components();
solAssert(tupleType->components().size() == targetComponents.size(), "");
TypePointers tempComponents(targetComponents.size());
for (size_t i = 0; i < targetComponents.size(); ++i)
{
if (tupleType->components()[i] && targetComponents[i])
{
tempComponents[i] = closestType(tupleType->components()[i], targetComponents[i], _isShiftOp);
solAssert(tempComponents[i], "");
}
}
return TypeProvider::tuple(move(tempComponents));
}
else
return _targetType->dataStoredIn(DataLocation::Storage) ? _type->mobileType() : _targetType;
}
}
void ExpressionCompiler::compile(Expression const& _expression)
{
_expression.accept(*this);
}
void ExpressionCompiler::appendStateVariableInitialization(VariableDeclaration const& _varDecl)
{
if (!_varDecl.value())
return;
Type const* type = _varDecl.value()->annotation().type;
solAssert(!!type, "Type information not available.");
CompilerContext::LocationSetter locationSetter(m_context, _varDecl);
_varDecl.value()->accept(*this);
if (_varDecl.annotation().type->dataStoredIn(DataLocation::Storage))
{
// reference type, only convert value to mobile type and do final conversion in storeValue.
auto mt = type->mobileType();
solAssert(mt, "");
utils().convertType(*type, *mt);
type = mt;
}
else
{
utils().convertType(*type, *_varDecl.annotation().type);
type = _varDecl.annotation().type;
}
if (_varDecl.immutable())
ImmutableItem(m_context, _varDecl).storeValue(*type, _varDecl.location(), true);
else
StorageItem(m_context, _varDecl).storeValue(*type, _varDecl.location(), true);
}
void ExpressionCompiler::appendConstStateVariableAccessor(VariableDeclaration const& _varDecl)
{
solAssert(_varDecl.isConstant(), "");
acceptAndConvert(*_varDecl.value(), *_varDecl.annotation().type);
// append return
m_context << dupInstruction(_varDecl.annotation().type->sizeOnStack() + 1);
m_context.appendJump(evmasm::AssemblyItem::JumpType::OutOfFunction);
}
void ExpressionCompiler::appendStateVariableAccessor(VariableDeclaration const& _varDecl)
{
solAssert(!_varDecl.isConstant(), "");
CompilerContext::LocationSetter locationSetter(m_context, _varDecl);
FunctionType accessorType(_varDecl);
TypePointers paramTypes = accessorType.parameterTypes();
if (_varDecl.immutable())
solAssert(paramTypes.empty(), "");
m_context.adjustStackOffset(static_cast(1 + CompilerUtils::sizeOnStack(paramTypes)));
if (!_varDecl.immutable())
{
// retrieve the position of the variable
auto const& location = m_context.storageLocationOfVariable(_varDecl);
m_context << location.first << u256(location.second);
}
Type const* returnType = _varDecl.annotation().type;
for (size_t i = 0; i < paramTypes.size(); ++i)
{
if (auto mappingType = dynamic_cast(returnType))
{
solAssert(CompilerUtils::freeMemoryPointer >= 0x40, "");
// pop offset
m_context << Instruction::POP;
if (paramTypes[i]->isDynamicallySized())
{
solAssert(
dynamic_cast(*paramTypes[i]).isByteArray(),
"Expected string or byte array for mapping key type"
);
// stack:
// copy key[i] to top.
utils().copyToStackTop(static_cast(paramTypes.size() - i + 1), 1);
m_context.appendInlineAssembly(R"({
let key_len := mload(key_ptr)
// Temp. use the memory after the array data for the slot
// position
let post_data_ptr := add(key_ptr, add(key_len, 0x20))
let orig_data := mload(post_data_ptr)
mstore(post_data_ptr, slot_pos)
let hash := keccak256(add(key_ptr, 0x20), add(key_len, 0x20))
mstore(post_data_ptr, orig_data)
slot_pos := hash
})", {"slot_pos", "key_ptr"});
m_context << Instruction::POP;
}
else
{
solAssert(paramTypes[i]->isValueType(), "Expected value type for mapping key");
// move storage offset to memory.
utils().storeInMemory(32);
// move key to memory.
utils().copyToStackTop(static_cast(paramTypes.size() - i), 1);
utils().storeInMemory(0);
m_context << u256(64) << u256(0);
m_context << Instruction::KECCAK256;
}
// push offset
m_context << u256(0);
returnType = mappingType->valueType();
}
else if (auto arrayType = dynamic_cast(returnType))
{
// pop offset
m_context << Instruction::POP;
utils().copyToStackTop(static_cast(paramTypes.size() - i + 1), 1);
ArrayUtils(m_context).retrieveLength(*arrayType, 1);
// Stack: ref [length] index length
// check out-of-bounds access
m_context << Instruction::DUP2 << Instruction::LT;
auto tag = m_context.appendConditionalJump();
m_context << u256(0) << Instruction::DUP1 << Instruction::REVERT;
m_context << tag;
ArrayUtils(m_context).accessIndex(*arrayType, false);
returnType = arrayType->baseType();
}
else
solAssert(false, "Index access is allowed only for \"mapping\" and \"array\" types.");
}
// remove index arguments.
if (paramTypes.size() == 1)
m_context << Instruction::SWAP2 << Instruction::POP << Instruction::SWAP1;
else if (paramTypes.size() >= 2)
{
m_context << swapInstruction(static_cast(paramTypes.size()));
m_context << Instruction::POP;
m_context << swapInstruction(static_cast(paramTypes.size()));
utils().popStackSlots(paramTypes.size() - 1);
}
unsigned retSizeOnStack = 0;
auto returnTypes = accessorType.returnParameterTypes();
solAssert(returnTypes.size() >= 1, "");
if (StructType const* structType = dynamic_cast(returnType))
{
solAssert(!_varDecl.immutable(), "");
// remove offset
m_context << Instruction::POP;
auto const& names = accessorType.returnParameterNames();
// struct
for (size_t i = 0; i < names.size(); ++i)
{
if (returnTypes[i]->category() == Type::Category::Mapping)
continue;
if (auto arrayType = dynamic_cast(returnTypes[i]))
if (!arrayType->isByteArray())
continue;
pair const& offsets = structType->storageOffsetsOfMember(names[i]);
m_context << Instruction::DUP1 << u256(offsets.first) << Instruction::ADD << u256(offsets.second);
Type const* memberType = structType->memberType(names[i]);
StorageItem(m_context, *memberType).retrieveValue(SourceLocation(), true);
utils().convertType(*memberType, *returnTypes[i]);
utils().moveToStackTop(returnTypes[i]->sizeOnStack());
retSizeOnStack += returnTypes[i]->sizeOnStack();
}
// remove slot
m_context << Instruction::POP;
}
else
{
// simple value or array
solAssert(returnTypes.size() == 1, "");
if (_varDecl.immutable())
ImmutableItem(m_context, _varDecl).retrieveValue(SourceLocation());
else
StorageItem(m_context, *returnType).retrieveValue(SourceLocation(), true);
utils().convertType(*returnType, *returnTypes.front());
retSizeOnStack = returnTypes.front()->sizeOnStack();
}
solAssert(retSizeOnStack == utils().sizeOnStack(returnTypes), "");
if (retSizeOnStack > 15)
BOOST_THROW_EXCEPTION(
StackTooDeepError() <<
errinfo_sourceLocation(_varDecl.location()) <<
errinfo_comment("Stack too deep.")
);
m_context << dupInstruction(retSizeOnStack + 1);
m_context.appendJump(evmasm::AssemblyItem::JumpType::OutOfFunction);
}
bool ExpressionCompiler::visit(Conditional const& _condition)
{
CompilerContext::LocationSetter locationSetter(m_context, _condition);
_condition.condition().accept(*this);
evmasm::AssemblyItem trueTag = m_context.appendConditionalJump();
acceptAndConvert(_condition.falseExpression(), *_condition.annotation().type);
evmasm::AssemblyItem endTag = m_context.appendJumpToNew();
m_context << trueTag;
int offset = static_cast(_condition.annotation().type->sizeOnStack());
m_context.adjustStackOffset(-offset);
acceptAndConvert(_condition.trueExpression(), *_condition.annotation().type);
m_context << endTag;
return false;
}
bool ExpressionCompiler::visit(Assignment const& _assignment)
{
CompilerContext::LocationSetter locationSetter(m_context, _assignment);
Token op = _assignment.assignmentOperator();
Token binOp = op == Token::Assign ? op : TokenTraits::AssignmentToBinaryOp(op);
Type const& leftType = *_assignment.leftHandSide().annotation().type;
if (leftType.category() == Type::Category::Tuple)
{
solAssert(*_assignment.annotation().type == TupleType(), "");
solAssert(op == Token::Assign, "");
}
else
solAssert(*_assignment.annotation().type == leftType, "");
bool cleanupNeeded = false;
if (op != Token::Assign)
cleanupNeeded = cleanupNeededForOp(leftType.category(), binOp, m_context.arithmetic());
_assignment.rightHandSide().accept(*this);
// Perform some conversion already. This will convert storage types to memory and literals
// to their actual type, but will not convert e.g. memory to storage.
Type const* rightIntermediateType = closestType(
_assignment.rightHandSide().annotation().type,
_assignment.leftHandSide().annotation().type,
op != Token::Assign && TokenTraits::isShiftOp(binOp)
);
solAssert(rightIntermediateType, "");
utils().convertType(*_assignment.rightHandSide().annotation().type, *rightIntermediateType, cleanupNeeded);
_assignment.leftHandSide().accept(*this);
solAssert(!!m_currentLValue, "LValue not retrieved.");
if (op == Token::Assign)
m_currentLValue->storeValue(*rightIntermediateType, _assignment.location());
else // compound assignment
{
solAssert(binOp != Token::Exp, "Compound exp is not possible.");
solAssert(leftType.isValueType(), "Compound operators only available for value types.");
unsigned lvalueSize = m_currentLValue->sizeOnStack();
unsigned itemSize = _assignment.annotation().type->sizeOnStack();
if (lvalueSize > 0)
{
utils().copyToStackTop(lvalueSize + itemSize, itemSize);
utils().copyToStackTop(itemSize + lvalueSize, lvalueSize);
// value lvalue_ref value lvalue_ref
}
m_currentLValue->retrieveValue(_assignment.location(), true);
utils().convertType(leftType, leftType, cleanupNeeded);
if (TokenTraits::isShiftOp(binOp))
appendShiftOperatorCode(binOp, leftType, *rightIntermediateType);
else
{
solAssert(leftType == *rightIntermediateType, "");
appendOrdinaryBinaryOperatorCode(binOp, leftType);
}
if (lvalueSize > 0)
{
if (itemSize + lvalueSize > 16)
BOOST_THROW_EXCEPTION(
StackTooDeepError() <<
errinfo_sourceLocation(_assignment.location()) <<
errinfo_comment("Stack too deep, try removing local variables.")
);
// value [lvalue_ref] updated_value
for (unsigned i = 0; i < itemSize; ++i)
m_context << swapInstruction(itemSize + lvalueSize) << Instruction::POP;
}
m_currentLValue->storeValue(*_assignment.annotation().type, _assignment.location());
}
m_currentLValue.reset();
return false;
}
bool ExpressionCompiler::visit(TupleExpression const& _tuple)
{
if (_tuple.isInlineArray())
{
ArrayType const& arrayType = dynamic_cast(*_tuple.annotation().type);
solAssert(!arrayType.isDynamicallySized(), "Cannot create dynamically sized inline array.");
utils().allocateMemory(max(u256(32u), arrayType.memoryDataSize()));
m_context << Instruction::DUP1;
for (auto const& component: _tuple.components())
{
acceptAndConvert(*component, *arrayType.baseType(), true);
utils().storeInMemoryDynamic(*arrayType.baseType(), true);
}
m_context << Instruction::POP;
}
else
{
vector> lvalues;
for (auto const& component: _tuple.components())
if (component)
{
component->accept(*this);
if (_tuple.annotation().willBeWrittenTo)
{
solAssert(!!m_currentLValue, "");
lvalues.push_back(move(m_currentLValue));
}
}
else if (_tuple.annotation().willBeWrittenTo)
lvalues.push_back(unique_ptr());
if (_tuple.annotation().willBeWrittenTo)
{
if (_tuple.components().size() == 1)
m_currentLValue = move(lvalues[0]);
else
m_currentLValue = make_unique(m_context, move(lvalues));
}
}
return false;
}
bool ExpressionCompiler::visit(UnaryOperation const& _unaryOperation)
{
CompilerContext::LocationSetter locationSetter(m_context, _unaryOperation);
Type const& type = *_unaryOperation.annotation().type;
if (type.category() == Type::Category::RationalNumber)
{
m_context << type.literalValue(nullptr);
return false;
}
_unaryOperation.subExpression().accept(*this);
switch (_unaryOperation.getOperator())
{
case Token::Not: // !
m_context << Instruction::ISZERO;
break;
case Token::BitNot: // ~
m_context << Instruction::NOT;
break;
case Token::Delete: // delete
solAssert(!!m_currentLValue, "LValue not retrieved.");
m_currentLValue->setToZero(_unaryOperation.location());
m_currentLValue.reset();
break;
case Token::Inc: // ++ (pre- or postfix)
case Token::Dec: // -- (pre- or postfix)
solAssert(!!m_currentLValue, "LValue not retrieved.");
solUnimplementedAssert(
type.category() != Type::Category::FixedPoint,
"Not yet implemented - FixedPointType."
);
m_currentLValue->retrieveValue(_unaryOperation.location());
if (!_unaryOperation.isPrefixOperation())
{
// store value for later
solUnimplementedAssert(type.sizeOnStack() == 1, "Stack size != 1 not implemented.");
m_context << Instruction::DUP1;
if (m_currentLValue->sizeOnStack() > 0)
for (unsigned i = 1 + m_currentLValue->sizeOnStack(); i > 0; --i)
m_context << swapInstruction(i);
}
if (_unaryOperation.getOperator() == Token::Inc)
{
if (m_context.arithmetic() == Arithmetic::Checked)
m_context.callYulFunction(m_context.utilFunctions().incrementCheckedFunction(type), 1, 1);
else
{
m_context << u256(1);
m_context << Instruction::ADD;
}
}
else
{
if (m_context.arithmetic() == Arithmetic::Checked)
m_context.callYulFunction(m_context.utilFunctions().decrementCheckedFunction(type), 1, 1);
else
{
m_context << u256(1);
m_context << Instruction::SWAP1 << Instruction::SUB;
}
}
// Stack for prefix: [ref...] (*ref)+-1
// Stack for postfix: *ref [ref...] (*ref)+-1
for (unsigned i = m_currentLValue->sizeOnStack(); i > 0; --i)
m_context << swapInstruction(i);
m_currentLValue->storeValue(
*_unaryOperation.annotation().type, _unaryOperation.location(),
!_unaryOperation.isPrefixOperation());
m_currentLValue.reset();
break;
case Token::Add: // +
// unary add, so basically no-op
break;
case Token::Sub: // -
solUnimplementedAssert(
type.category() != Type::Category::FixedPoint,
"Not yet implemented - FixedPointType."
);
if (m_context.arithmetic() == Arithmetic::Checked)
m_context.callYulFunction(m_context.utilFunctions().negateNumberCheckedFunction(type), 1, 1);
else
m_context << u256(0) << Instruction::SUB;
break;
default:
solAssert(false, "Invalid unary operator: " + string(TokenTraits::toString(_unaryOperation.getOperator())));
}
return false;
}
bool ExpressionCompiler::visit(BinaryOperation const& _binaryOperation)
{
CompilerContext::LocationSetter locationSetter(m_context, _binaryOperation);
Expression const& leftExpression = _binaryOperation.leftExpression();
Expression const& rightExpression = _binaryOperation.rightExpression();
solAssert(!!_binaryOperation.annotation().commonType, "");
Type const* commonType = _binaryOperation.annotation().commonType;
Token const c_op = _binaryOperation.getOperator();
if (c_op == Token::And || c_op == Token::Or) // special case: short-circuiting
appendAndOrOperatorCode(_binaryOperation);
else if (commonType->category() == Type::Category::RationalNumber)
m_context << commonType->literalValue(nullptr);
else
{
bool cleanupNeeded = cleanupNeededForOp(commonType->category(), c_op, m_context.arithmetic());
Type const* leftTargetType = commonType;
Type const* rightTargetType =
TokenTraits::isShiftOp(c_op) || c_op == Token::Exp ?
rightExpression.annotation().type->mobileType() :
commonType;
solAssert(rightTargetType, "");
// for commutative operators, push the literal as late as possible to allow improved optimization
auto isLiteral = [](Expression const& _e)
{
return dynamic_cast(&_e) || _e.annotation().type->category() == Type::Category::RationalNumber;
};
bool swap = m_optimiseOrderLiterals && TokenTraits::isCommutativeOp(c_op) && isLiteral(rightExpression) && !isLiteral(leftExpression);
if (swap)
{
acceptAndConvert(leftExpression, *leftTargetType, cleanupNeeded);
acceptAndConvert(rightExpression, *rightTargetType, cleanupNeeded);
}
else
{
acceptAndConvert(rightExpression, *rightTargetType, cleanupNeeded);
acceptAndConvert(leftExpression, *leftTargetType, cleanupNeeded);
}
if (TokenTraits::isShiftOp(c_op))
// shift only cares about the signedness of both sides
appendShiftOperatorCode(c_op, *leftTargetType, *rightTargetType);
else if (c_op == Token::Exp)
appendExpOperatorCode(*leftTargetType, *rightTargetType);
else if (TokenTraits::isCompareOp(c_op))
appendCompareOperatorCode(c_op, *commonType);
else
appendOrdinaryBinaryOperatorCode(c_op, *commonType);
}
// do not visit the child nodes, we already did that explicitly
return false;
}
bool ExpressionCompiler::visit(FunctionCall const& _functionCall)
{
auto functionCallKind = *_functionCall.annotation().kind;
CompilerContext::LocationSetter locationSetter(m_context, _functionCall);
if (functionCallKind == FunctionCallKind::TypeConversion)
{
solAssert(_functionCall.arguments().size() == 1, "");
solAssert(_functionCall.names().empty(), "");
auto const& expression = *_functionCall.arguments().front();
auto const& targetType = *_functionCall.annotation().type;
if (auto const* typeType = dynamic_cast(expression.annotation().type))
if (auto const* addressType = dynamic_cast(&targetType))
{
auto const* contractType = dynamic_cast(typeType->actualType());
solAssert(
contractType &&
contractType->contractDefinition().isLibrary() &&
addressType->stateMutability() == StateMutability::NonPayable,
""
);
m_context.appendLibraryAddress(contractType->contractDefinition().fullyQualifiedName());
return false;
}
acceptAndConvert(expression, targetType);
return false;
}
FunctionTypePointer functionType;
if (functionCallKind == FunctionCallKind::StructConstructorCall)
{
auto const& type = dynamic_cast(*_functionCall.expression().annotation().type);
auto const& structType = dynamic_cast(*type.actualType());
functionType = structType.constructorType();
}
else
functionType = dynamic_cast(_functionCall.expression().annotation().type);
TypePointers parameterTypes = functionType->parameterTypes();
vector> const& arguments = _functionCall.sortedArguments();
if (functionCallKind == FunctionCallKind::StructConstructorCall)
{
TypeType const& type = dynamic_cast(*_functionCall.expression().annotation().type);
auto const& structType = dynamic_cast(*type.actualType());
utils().allocateMemory(max(u256(32u), structType.memoryDataSize()));
m_context << Instruction::DUP1;
for (unsigned i = 0; i < arguments.size(); ++i)
{
acceptAndConvert(*arguments[i], *functionType->parameterTypes()[i]);
utils().storeInMemoryDynamic(*functionType->parameterTypes()[i]);
}
m_context << Instruction::POP;
}
else
{
FunctionType const& function = *functionType;
if (function.bound())
solAssert(
function.kind() == FunctionType::Kind::DelegateCall ||
function.kind() == FunctionType::Kind::Internal ||
function.kind() == FunctionType::Kind::ArrayPush ||
function.kind() == FunctionType::Kind::ArrayPop,
"");
switch (function.kind())
{
case FunctionType::Kind::Declaration:
solAssert(false, "Attempted to generate code for calling a function definition.");
break;
case FunctionType::Kind::Internal:
{
// Calling convention: Caller pushes return address and arguments
// Callee removes them and pushes return values
evmasm::AssemblyItem returnLabel = m_context.pushNewTag();
for (unsigned i = 0; i < arguments.size(); ++i)
acceptAndConvert(*arguments[i], *function.parameterTypes()[i]);
{
bool shortcutTaken = false;
if (auto identifier = dynamic_cast(&_functionCall.expression()))
{
solAssert(!function.bound(), "");
if (auto functionDef = dynamic_cast(identifier->annotation().referencedDeclaration))
{
// Do not directly visit the identifier, because this way, we can avoid
// the runtime entry label to be created at the creation time context.
CompilerContext::LocationSetter locationSetter2(m_context, *identifier);
solAssert(*identifier->annotation().requiredLookup == VirtualLookup::Virtual, "");
utils().pushCombinedFunctionEntryLabel(
functionDef->resolveVirtual(m_context.mostDerivedContract()),
false
);
shortcutTaken = true;
}
}
if (!shortcutTaken)
_functionCall.expression().accept(*this);
}
unsigned parameterSize = CompilerUtils::sizeOnStack(function.parameterTypes());
if (function.bound())
{
// stack: arg2, ..., argn, label, arg1
unsigned depth = parameterSize + 1;
utils().moveIntoStack(depth, function.selfType()->sizeOnStack());
parameterSize += function.selfType()->sizeOnStack();
}
if (m_context.runtimeContext())
// We have a runtime context, so we need the creation part.
utils().rightShiftNumberOnStack(32);
else
// Extract the runtime part.
m_context << ((u256(1) << 32) - 1) << Instruction::AND;
m_context.appendJump(evmasm::AssemblyItem::JumpType::IntoFunction);
m_context << returnLabel;
unsigned returnParametersSize = CompilerUtils::sizeOnStack(function.returnParameterTypes());
// callee adds return parameters, but removes arguments and return label
m_context.adjustStackOffset(static_cast(returnParametersSize - parameterSize) - 1);
break;
}
case FunctionType::Kind::BareCall:
case FunctionType::Kind::BareDelegateCall:
case FunctionType::Kind::BareStaticCall:
solAssert(!_functionCall.annotation().tryCall, "");
[[fallthrough]];
case FunctionType::Kind::External:
case FunctionType::Kind::DelegateCall:
_functionCall.expression().accept(*this);
appendExternalFunctionCall(function, arguments, _functionCall.annotation().tryCall);
break;
case FunctionType::Kind::BareCallCode:
solAssert(false, "Callcode has been removed.");
case FunctionType::Kind::Creation:
{
_functionCall.expression().accept(*this);
// Stack: [salt], [value]
solAssert(!function.gasSet(), "Gas limit set for contract creation.");
solAssert(function.returnParameterTypes().size() == 1, "");
TypePointers argumentTypes;
for (auto const& arg: arguments)
{
arg->accept(*this);
argumentTypes.push_back(arg->annotation().type);
}
ContractDefinition const* contract =
&dynamic_cast(*function.returnParameterTypes().front()).contractDefinition();
utils().fetchFreeMemoryPointer();
utils().copyContractCodeToMemory(*contract, true);
utils().abiEncode(argumentTypes, function.parameterTypes());
// now on stack: [salt], [value], memory_end_ptr
// need: [salt], size, offset, value
if (function.saltSet())
{
m_context << dupInstruction(2 + (function.valueSet() ? 1 : 0));
m_context << Instruction::SWAP1;
}
// now: [salt], [value], [salt], memory_end_ptr
utils().toSizeAfterFreeMemoryPointer();
// now: [salt], [value], [salt], size, offset
if (function.valueSet())
m_context << dupInstruction(3 + (function.saltSet() ? 1 : 0));
else
m_context << u256(0);
// now: [salt], [value], [salt], size, offset, value
if (function.saltSet())
m_context << Instruction::CREATE2;
else
m_context << Instruction::CREATE;
// now: [salt], [value], address
if (function.valueSet())
m_context << swapInstruction(1) << Instruction::POP;
if (function.saltSet())
m_context << swapInstruction(1) << Instruction::POP;
// Check if zero (reverted)
m_context << Instruction::DUP1 << Instruction::ISZERO;
if (_functionCall.annotation().tryCall)
{
// If this is a try call, return " 1" in the success case and
// "0" in the error case.
AssemblyItem errorCase = m_context.appendConditionalJump();
m_context << u256(1);
m_context << errorCase;
}
else
m_context.appendConditionalRevert(true);
break;
}
case FunctionType::Kind::SetGas:
{
// stack layout: contract_address function_id [gas] [value]
_functionCall.expression().accept(*this);
acceptAndConvert(*arguments.front(), *TypeProvider::uint256(), true);
// Note that function is not the original function, but the ".gas" function.
// Its values of gasSet and valueSet is equal to the original function's though.
unsigned stackDepth = (function.gasSet() ? 1u : 0u) + (function.valueSet() ? 1u : 0u);
if (stackDepth > 0)
m_context << swapInstruction(stackDepth);
if (function.gasSet())
m_context << Instruction::POP;
break;
}
case FunctionType::Kind::SetValue:
// stack layout: contract_address function_id [gas] [value]
_functionCall.expression().accept(*this);
// Note that function is not the original function, but the ".value" function.
// Its values of gasSet and valueSet is equal to the original function's though.
if (function.valueSet())
m_context << Instruction::POP;
arguments.front()->accept(*this);
break;
case FunctionType::Kind::Send:
case FunctionType::Kind::Transfer:
{
_functionCall.expression().accept(*this);
// Provide the gas stipend manually at first because we may send zero ether.
// Will be zeroed if we send more than zero ether.
m_context << u256(evmasm::GasCosts::callStipend);
acceptAndConvert(*arguments.front(), *function.parameterTypes().front(), true);
// gas <- gas * !value
m_context << Instruction::SWAP1 << Instruction::DUP2;
m_context << Instruction::ISZERO << Instruction::MUL << Instruction::SWAP1;
FunctionType::Options callOptions;
callOptions.valueSet = true;
callOptions.gasSet = true;
appendExternalFunctionCall(
FunctionType(
TypePointers{},
TypePointers{},
strings(),
strings(),
FunctionType::Kind::BareCall,
StateMutability::NonPayable,
nullptr,
callOptions
),
{},
false
);
if (function.kind() == FunctionType::Kind::Transfer)
{
// Check if zero (out of stack or not enough balance).
m_context << Instruction::ISZERO;
// Revert message bubbles up.
m_context.appendConditionalRevert(true);
}
break;
}
case FunctionType::Kind::Selfdestruct:
acceptAndConvert(*arguments.front(), *function.parameterTypes().front(), true);
m_context << Instruction::SELFDESTRUCT;
break;
case FunctionType::Kind::Revert:
{
if (arguments.empty())
m_context.appendRevert();
else
{
// function-sel(Error(string)) + encoding
solAssert(arguments.size() == 1, "");
solAssert(function.parameterTypes().size() == 1, "");
if (m_context.revertStrings() == RevertStrings::Strip)
{
if (!*arguments.front()->annotation().isPure)
{
arguments.front()->accept(*this);
utils().popStackElement(*arguments.front()->annotation().type);
}
m_context.appendRevert();
}
else
{
arguments.front()->accept(*this);
utils().revertWithStringData(*arguments.front()->annotation().type);
}
}
break;
}
case FunctionType::Kind::KECCAK256:
{
solAssert(arguments.size() == 1, "");
solAssert(!function.padArguments(), "");
Type const* argType = arguments.front()->annotation().type;
solAssert(argType, "");
arguments.front()->accept(*this);
if (auto const* stringLiteral = dynamic_cast(argType))
// Optimization: Compute keccak256 on string literals at compile-time.
m_context << u256(keccak256(stringLiteral->value()));
else if (*argType == *TypeProvider::bytesMemory() || *argType == *TypeProvider::stringMemory())
{
// Optimization: If type is bytes or string, then do not encode,
// but directly compute keccak256 on memory.
ArrayUtils(m_context).retrieveLength(*TypeProvider::bytesMemory());
m_context << Instruction::SWAP1 << u256(0x20) << Instruction::ADD;
m_context << Instruction::KECCAK256;
}
else
{
utils().fetchFreeMemoryPointer();
utils().packedEncode({argType}, TypePointers());
utils().toSizeAfterFreeMemoryPointer();
m_context << Instruction::KECCAK256;
}
break;
}
case FunctionType::Kind::Event:
{
_functionCall.expression().accept(*this);
auto const& event = dynamic_cast(function.declaration());
unsigned numIndexed = 0;
TypePointers paramTypes = function.parameterTypes();
// All indexed arguments go to the stack
for (size_t arg = arguments.size(); arg > 0; --arg)
if (event.parameters()[arg - 1]->isIndexed())
{
++numIndexed;
arguments[arg - 1]->accept(*this);
if (auto const& referenceType = dynamic_cast(paramTypes[arg - 1]))
{
utils().fetchFreeMemoryPointer();
utils().packedEncode(
{arguments[arg - 1]->annotation().type},
{referenceType}
);
utils().toSizeAfterFreeMemoryPointer();
m_context << Instruction::KECCAK256;
}
else
{
solAssert(paramTypes[arg - 1]->isValueType(), "");
if (auto functionType = dynamic_cast(paramTypes[arg - 1]))
{
auto argumentType =
dynamic_cast(arguments[arg-1]->annotation().type);
solAssert(
argumentType &&
functionType->kind() == FunctionType::Kind::External &&
argumentType->kind() == FunctionType::Kind::External &&
!argumentType->bound(),
""
);
utils().combineExternalFunctionType(true);
}
else
utils().convertType(
*arguments[arg - 1]->annotation().type,
*paramTypes[arg - 1],
true
);
}
}
if (!event.isAnonymous())
{
m_context << u256(h256::Arith(keccak256(function.externalSignature())));
++numIndexed;
}
solAssert(numIndexed <= 4, "Too many indexed arguments.");
// Copy all non-indexed arguments to memory (data)
// Memory position is only a hack and should be removed once we have free memory pointer.
TypePointers nonIndexedArgTypes;
TypePointers nonIndexedParamTypes;
for (unsigned arg = 0; arg < arguments.size(); ++arg)
if (!event.parameters()[arg]->isIndexed())
{
arguments[arg]->accept(*this);
nonIndexedArgTypes.push_back(arguments[arg]->annotation().type);
nonIndexedParamTypes.push_back(paramTypes[arg]);
}
utils().fetchFreeMemoryPointer();
utils().abiEncode(nonIndexedArgTypes, nonIndexedParamTypes);
// need: topic1 ... topicn memsize memstart
utils().toSizeAfterFreeMemoryPointer();
m_context << logInstruction(numIndexed);
break;
}
case FunctionType::Kind::Error:
{
_functionCall.expression().accept(*this);
vector argumentTypes;
for (ASTPointer const& arg: _functionCall.sortedArguments())
{
arg->accept(*this);
argumentTypes.push_back(arg->annotation().type);
}
solAssert(dynamic_cast(&function.declaration()), "");
utils().revertWithError(
function.externalSignature(),
function.parameterTypes(),
argumentTypes
);
break;
}
case FunctionType::Kind::Wrap:
case FunctionType::Kind::Unwrap:
{
solAssert(arguments.size() == 1, "");
Type const* argumentType = arguments.at(0)->annotation().type;
Type const* functionCallType = _functionCall.annotation().type;
solAssert(argumentType, "");
solAssert(functionCallType, "");
FunctionType::Kind kind = functionType->kind();
if (kind == FunctionType::Kind::Wrap)
{
solAssert(
argumentType->isImplicitlyConvertibleTo(
dynamic_cast(*functionCallType).underlyingType()
),
""
);
solAssert(argumentType->isImplicitlyConvertibleTo(*function.parameterTypes()[0]), "");
}
else
solAssert(
dynamic_cast(*argumentType) ==
dynamic_cast(*function.parameterTypes()[0]),
""
);
acceptAndConvert(*arguments[0], *function.parameterTypes()[0]);
break;
}
case FunctionType::Kind::BlockHash:
{
acceptAndConvert(*arguments[0], *function.parameterTypes()[0], true);
m_context << Instruction::BLOCKHASH;
break;
}
case FunctionType::Kind::AddMod:
case FunctionType::Kind::MulMod:
{
acceptAndConvert(*arguments[2], *TypeProvider::uint256());
m_context << Instruction::DUP1 << Instruction::ISZERO;
m_context.appendConditionalPanic(util::PanicCode::DivisionByZero);
for (unsigned i = 1; i < 3; i ++)
acceptAndConvert(*arguments[2 - i], *TypeProvider::uint256());
if (function.kind() == FunctionType::Kind::AddMod)
m_context << Instruction::ADDMOD;
else
m_context << Instruction::MULMOD;
break;
}
case FunctionType::Kind::ECRecover:
case FunctionType::Kind::SHA256:
case FunctionType::Kind::RIPEMD160:
{
_functionCall.expression().accept(*this);
static map const contractAddresses{
{FunctionType::Kind::ECRecover, 1},
{FunctionType::Kind::SHA256, 2},
{FunctionType::Kind::RIPEMD160, 3}
};
m_context << contractAddresses.at(function.kind());
for (unsigned i = function.sizeOnStack(); i > 0; --i)
m_context << swapInstruction(i);
solAssert(!_functionCall.annotation().tryCall, "");
appendExternalFunctionCall(function, arguments, false);
break;
}
case FunctionType::Kind::ArrayPush:
{
solAssert(function.bound(), "");
_functionCall.expression().accept(*this);
if (function.parameterTypes().size() == 0)
{
auto paramType = function.returnParameterTypes().at(0);
solAssert(paramType, "");
ArrayType const* arrayType = dynamic_cast(function.selfType());
solAssert(arrayType, "");
// stack: ArrayReference
m_context << u256(1) << Instruction::DUP2;
ArrayUtils(m_context).incrementDynamicArraySize(*arrayType);
// stack: ArrayReference 1 newLength
m_context << Instruction::SUB;
// stack: ArrayReference (newLength-1)
ArrayUtils(m_context).accessIndex(*arrayType, false);
if (arrayType->isByteArray())
setLValue(_functionCall);
else
setLValueToStorageItem(_functionCall);
}
else
{
solAssert(function.parameterTypes().size() == 1, "");
solAssert(!!function.parameterTypes()[0], "");
Type const* paramType = function.parameterTypes()[0];
ArrayType const* arrayType = dynamic_cast(function.selfType());
solAssert(arrayType, "");
// stack: ArrayReference
arguments[0]->accept(*this);
Type const* argType = arguments[0]->annotation().type;
// stack: ArrayReference argValue
utils().moveToStackTop(argType->sizeOnStack(), 1);
// stack: argValue ArrayReference
m_context << Instruction::DUP1;
ArrayUtils(m_context).incrementDynamicArraySize(*arrayType);
// stack: argValue ArrayReference newLength
m_context << u256(1) << Instruction::SWAP1 << Instruction::SUB;
// stack: argValue ArrayReference (newLength-1)
ArrayUtils(m_context).accessIndex(*arrayType, false);
// stack: argValue storageSlot slotOffset
utils().moveToStackTop(2, argType->sizeOnStack());
// stack: storageSlot slotOffset argValue
Type const* type =
arrayType->baseType()->dataStoredIn(DataLocation::Storage) ?
arguments[0]->annotation().type->mobileType() :
arrayType->baseType();
solAssert(type, "");
utils().convertType(*argType, *type);
utils().moveToStackTop(1 + type->sizeOnStack());
utils().moveToStackTop(1 + type->sizeOnStack());
// stack: argValue storageSlot slotOffset
if (!arrayType->isByteArray())
StorageItem(m_context, *paramType).storeValue(*type, _functionCall.location(), true);
else
StorageByteArrayElement(m_context).storeValue(*type, _functionCall.location(), true);
}
break;
}
case FunctionType::Kind::ArrayPop:
{
_functionCall.expression().accept(*this);
solAssert(function.bound(), "");
solAssert(function.parameterTypes().empty(), "");
ArrayType const* arrayType = dynamic_cast(function.selfType());
solAssert(arrayType && arrayType->dataStoredIn(DataLocation::Storage), "");
ArrayUtils(m_context).popStorageArrayElement(*arrayType);
break;
}
case FunctionType::Kind::BytesConcat:
{
_functionCall.expression().accept(*this);
vector argumentTypes;
vector targetTypes;
for (auto const& argument: arguments)
{
argument->accept(*this);
solAssert(argument->annotation().type, "");
argumentTypes.emplace_back(argument->annotation().type);
if (argument->annotation().type->category() == Type::Category::FixedBytes)
targetTypes.emplace_back(argument->annotation().type);
else if (
auto const* literalType = dynamic_cast(argument->annotation().type);
literalType && !literalType->value().empty() && literalType->value().size() <= 32
)
targetTypes.emplace_back(TypeProvider::fixedBytes(static_cast(literalType->value().size())));
else
{
solAssert(!dynamic_cast(argument->annotation().type), "");
solAssert(argument->annotation().type->isImplicitlyConvertibleTo(*TypeProvider::bytesMemory()), "");
targetTypes.emplace_back(TypeProvider::bytesMemory());
}
}
utils().fetchFreeMemoryPointer();
// stack: ...
m_context << u256(32) << Instruction::ADD;
utils().packedEncode(argumentTypes, targetTypes);
utils().fetchFreeMemoryPointer();
m_context.appendInlineAssembly(R"({
mstore(mem_ptr, sub(sub(mem_end, mem_ptr), 0x20))
})", {"mem_end", "mem_ptr"});
m_context << Instruction::SWAP1;
utils().storeFreeMemoryPointer();
break;
}
case FunctionType::Kind::ObjectCreation:
{
ArrayType const& arrayType = dynamic_cast(*_functionCall.annotation().type);
_functionCall.expression().accept(*this);
solAssert(arguments.size() == 1, "");
// Fetch requested length.
acceptAndConvert(*arguments[0], *TypeProvider::uint256());
// Make sure we can allocate memory without overflow
m_context << u256(0xffffffffffffffff);
m_context << Instruction::DUP2;
m_context << Instruction::GT;
m_context.appendConditionalPanic(PanicCode::ResourceError);
// Stack: requested_length
utils().fetchFreeMemoryPointer();
// Stack: requested_length memptr
m_context << Instruction::SWAP1;
// Stack: memptr requested_length
// store length
m_context << Instruction::DUP1 << Instruction::DUP3 << Instruction::MSTORE;
// Stack: memptr requested_length
// update free memory pointer
m_context << Instruction::DUP1;
// Stack: memptr requested_length requested_length
if (arrayType.isByteArray())
// Round up to multiple of 32
m_context << u256(31) << Instruction::ADD << u256(31) << Instruction::NOT << Instruction::AND;
else
m_context << arrayType.baseType()->memoryHeadSize() << Instruction::MUL;
// stacK: memptr requested_length data_size
m_context << u256(32) << Instruction::ADD;
m_context << Instruction::DUP3 << Instruction::ADD;
utils().storeFreeMemoryPointer();
// Stack: memptr requested_length
// Check if length is zero
m_context << Instruction::DUP1 << Instruction::ISZERO;
auto skipInit = m_context.appendConditionalJump();
// Always initialize because the free memory pointer might point at
// a dirty memory area.
m_context << Instruction::DUP2 << u256(32) << Instruction::ADD;
utils().zeroInitialiseMemoryArray(arrayType);
m_context << skipInit;
m_context << Instruction::POP;
break;
}
case FunctionType::Kind::Assert:
case FunctionType::Kind::Require:
{
acceptAndConvert(*arguments.front(), *function.parameterTypes().front(), false);
bool haveReasonString = arguments.size() > 1 && m_context.revertStrings() != RevertStrings::Strip;
if (arguments.size() > 1)
{
// Users probably expect the second argument to be evaluated
// even if the condition is false, as would be the case for an actual
// function call.
solAssert(arguments.size() == 2, "");
solAssert(function.kind() == FunctionType::Kind::Require, "");
if (m_context.revertStrings() == RevertStrings::Strip)
{
if (!*arguments.at(1)->annotation().isPure)
{
arguments.at(1)->accept(*this);
utils().popStackElement(*arguments.at(1)->annotation().type);
}
}
else
{
arguments.at(1)->accept(*this);
utils().moveIntoStack(1, arguments.at(1)->annotation().type->sizeOnStack());
}
}
// Stack:
// jump if condition was met
m_context << Instruction::ISZERO << Instruction::ISZERO;
auto success = m_context.appendConditionalJump();
if (function.kind() == FunctionType::Kind::Assert)
// condition was not met, flag an error
m_context.appendPanic(util::PanicCode::Assert);
else if (haveReasonString)
{
utils().revertWithStringData(*arguments.at(1)->annotation().type);
// Here, the argument is consumed, but in the other branch, it is still there.
m_context.adjustStackOffset(static_cast(arguments.at(1)->annotation().type->sizeOnStack()));
}
else
m_context.appendRevert();
// the success branch
m_context << success;
if (haveReasonString)
utils().popStackElement(*arguments.at(1)->annotation().type);
break;
}
case FunctionType::Kind::ABIEncode:
case FunctionType::Kind::ABIEncodePacked:
case FunctionType::Kind::ABIEncodeWithSelector:
case FunctionType::Kind::ABIEncodeCall:
case FunctionType::Kind::ABIEncodeWithSignature:
{
bool const isPacked = function.kind() == FunctionType::Kind::ABIEncodePacked;
bool const hasSelectorOrSignature =
function.kind() == FunctionType::Kind::ABIEncodeWithSelector ||
function.kind() == FunctionType::Kind::ABIEncodeCall ||
function.kind() == FunctionType::Kind::ABIEncodeWithSignature;
TypePointers argumentTypes;
ASTNode::listAccept(arguments, *this);
if (function.kind() == FunctionType::Kind::ABIEncodeCall)
{
solAssert(arguments.size() == 2);
auto const functionPtr = dynamic_cast(arguments[0]->annotation().type);
solAssert(functionPtr);
// Account for tuples with one component which become that component
if (auto const tupleType = dynamic_cast(arguments[1]->annotation().type))
argumentTypes = tupleType->components();
else
argumentTypes.emplace_back(arguments[1]->annotation().type);
}
else
for (unsigned i = 0; i < arguments.size(); ++i)
{
// Do not keep the selector as part of the ABI encoded args
if (!hasSelectorOrSignature || i > 0)
argumentTypes.push_back(arguments[i]->annotation().type);
}
utils().fetchFreeMemoryPointer();
// stack now: [] ..
// adjust by 32(+4) bytes to accommodate the length(+selector)
m_context << u256(32 + (hasSelectorOrSignature ? 4 : 0)) << Instruction::ADD;
// stack now: [] ..
if (isPacked)
{
solAssert(!function.padArguments(), "");
utils().packedEncode(argumentTypes, TypePointers());
}
else
{
solAssert(function.padArguments(), "");
utils().abiEncode(argumentTypes, TypePointers());
}
utils().fetchFreeMemoryPointer();
// stack: []
// size is end minus start minus length slot
m_context.appendInlineAssembly(R"({
mstore(mem_ptr, sub(sub(mem_end, mem_ptr), 0x20))
})", {"mem_end", "mem_ptr"});
m_context << Instruction::SWAP1;
utils().storeFreeMemoryPointer();
// stack: []
if (hasSelectorOrSignature)
{
// stack:
solAssert(arguments.size() >= 1, "");
Type const* selectorType = arguments[0]->annotation().type;
utils().moveIntoStack(selectorType->sizeOnStack());
Type const* dataOnStack = selectorType;
// stack:
if (function.kind() == FunctionType::Kind::ABIEncodeWithSignature)
{
// hash the signature
if (auto const* stringType = dynamic_cast(selectorType))
{
m_context << util::selectorFromSignature(stringType->value());
dataOnStack = TypeProvider::fixedBytes(4);
}
else
{
utils().fetchFreeMemoryPointer();
// stack:
utils().packedEncode(TypePointers{selectorType}, TypePointers());
utils().toSizeAfterFreeMemoryPointer();
m_context << Instruction::KECCAK256;
// stack:
dataOnStack = TypeProvider::fixedBytes(32);
}
}
else if (function.kind() == FunctionType::Kind::ABIEncodeCall)
{
auto const& funType = dynamic_cast(*selectorType);
if (funType.kind() == FunctionType::Kind::Declaration)
{
solAssert(funType.hasDeclaration());
solAssert(selectorType->sizeOnStack() == 0);
m_context << funType.externalIdentifier();
}
else
{
solAssert(selectorType->sizeOnStack() == 2);
// stack:
// Extract selector from the stack
m_context << Instruction::SWAP1 << Instruction::POP;
}
// Conversion will be done below
dataOnStack = TypeProvider::uint(32);
}
else
solAssert(function.kind() == FunctionType::Kind::ABIEncodeWithSelector, "");
utils().convertType(*dataOnStack, FixedBytesType(4), true);
// stack:
// load current memory, mask and combine the selector
string mask = formatNumber((u256(-1) >> 32));
m_context.appendInlineAssembly(R"({
let data_start := add(mem_ptr, 0x20)
let data := mload(data_start)
let mask := )" + mask + R"(
mstore(data_start, or(and(data, mask), selector))
})", {"mem_ptr", "selector"});
m_context << Instruction::POP;
}
// stack now:
break;
}
case FunctionType::Kind::ABIDecode:
{
arguments.front()->accept(*this);
Type const* firstArgType = arguments.front()->annotation().type;
TypePointers targetTypes;
if (TupleType const* targetTupleType = dynamic_cast(_functionCall.annotation().type))
targetTypes = targetTupleType->components();
else
targetTypes = TypePointers{_functionCall.annotation().type};
if (
auto referenceType = dynamic_cast(firstArgType);
referenceType && referenceType->dataStoredIn(DataLocation::CallData)
)
{
solAssert(referenceType->isImplicitlyConvertibleTo(*TypeProvider::bytesCalldata()), "");
utils().convertType(*referenceType, *TypeProvider::bytesCalldata());
utils().abiDecode(targetTypes, false);
}
else
{
utils().convertType(*firstArgType, *TypeProvider::bytesMemory());
m_context << Instruction::DUP1 << u256(32) << Instruction::ADD;
m_context << Instruction::SWAP1 << Instruction::MLOAD;
// stack now:
utils().abiDecode(targetTypes, true);
}
break;
}
case FunctionType::Kind::GasLeft:
m_context << Instruction::GAS;
break;
case FunctionType::Kind::MetaType:
// No code to generate.
break;
}
}
return false;
}
bool ExpressionCompiler::visit(FunctionCallOptions const& _functionCallOptions)
{
_functionCallOptions.expression().accept(*this);
// Desired Stack: [salt], [gas], [value]
enum Option { Salt, Gas, Value };
vector