diff --git a/docs/yul.rst b/docs/yul.rst index 281cb8d63..0e30a9082 100644 --- a/docs/yul.rst +++ b/docs/yul.rst @@ -952,6 +952,20 @@ option. See :ref:`Using the Commandline Compiler ` for details about the Solidity linker. +memoryguard +^^^^^^^^^^^ + +This function is available in the EVM dialect with objects. The caller of +``let ptr := memoryguard(size)`` promises that they only use memory in either +the range ``[0, size)`` or the unbounded range above ``ptr``. The Yul optimizer +promises to only use the memory range ``[size, ptr)`` for its purposes. +If the optimizer does not need to reserve any memory, it holds that ``ptr := size``. + +``memoryguard`` can be called multiple times, but needs to have the same literal as argument +within one Yul subobject. If at least one ``memoryguard`` call is found in a subobject, +the Yul optimiser will try to perform experimental steps like the stack limit evader, +which attempts to move stack variables that would otherwise be unreachable +to memory. .. _yul-object: diff --git a/libsolidity/codegen/ir/IRGenerationContext.h b/libsolidity/codegen/ir/IRGenerationContext.h index a833fdfa5..8f102dfda 100644 --- a/libsolidity/codegen/ir/IRGenerationContext.h +++ b/libsolidity/codegen/ir/IRGenerationContext.h @@ -142,6 +142,9 @@ public: std::set& subObjectsCreated() { return m_subObjects; } + bool inlineAssemblySeen() const { return m_inlineAssemblySeen; } + void setInlineAssemblySeen() { m_inlineAssemblySeen = true; } + private: langutil::EVMVersion m_evmVersion; RevertStrings m_revertStrings; @@ -159,6 +162,9 @@ private: MultiUseYulFunctionCollector m_functions; size_t m_varCounter = 0; + /// Flag indicating whether any inline assembly block was seen. + bool m_inlineAssemblySeen = false; + /// Function definitions queued for code generation. They're the Solidity functions whose calls /// were discovered by the IR generator during AST traversal. /// Note that the queue gets filled in a lazy way - new definitions can be added while the diff --git a/libsolidity/codegen/ir/IRGenerator.cpp b/libsolidity/codegen/ir/IRGenerator.cpp index 151b8c9fd..54098cc8f 100644 --- a/libsolidity/codegen/ir/IRGenerator.cpp +++ b/libsolidity/codegen/ir/IRGenerator.cpp @@ -92,7 +92,7 @@ string IRGenerator::generate( Whiskers t(R"( object "" { code { - + let := () @@ -103,7 +103,7 @@ string IRGenerator::generate( } object "" { code { - + } @@ -118,7 +118,6 @@ string IRGenerator::generate( m_context.registerImmutableVariable(*var); t("CreationObject", IRNames::creationObject(_contract)); - t("memoryInit", memoryInit()); t("notLibrary", !_contract.isLibrary()); FunctionDefinition const* constructor = _contract.constructor(); @@ -143,6 +142,7 @@ string IRGenerator::generate( InternalDispatchMap internalDispatchMap = generateInternalDispatchFunctions(); t("functions", m_context.functionCollector().requestedFunctions()); t("subObjects", subObjectSources(m_context.subObjectsCreated())); + t("memoryInitCreation", memoryInit(!m_context.inlineAssemblySeen())); resetContext(_contract); @@ -158,6 +158,7 @@ string IRGenerator::generate( generateInternalDispatchFunctions(); t("runtimeFunctions", m_context.functionCollector().requestedFunctions()); t("runtimeSubObjects", subObjectSources(m_context.subObjectsCreated())); + t("memoryInitRuntime", memoryInit(!m_context.inlineAssemblySeen())); return t.render(); } @@ -651,16 +652,22 @@ string IRGenerator::dispatchRoutine(ContractDefinition const& _contract) return t.render(); } -string IRGenerator::memoryInit() +string IRGenerator::memoryInit(bool _useMemoryGuard) { // This function should be called at the beginning of the EVM call frame // and thus can assume all memory to be zero, including the contents of // the "zero memory area" (the position CompilerUtils::zeroPointer points to). return - Whiskers{"mstore(, )"} + Whiskers{ + _useMemoryGuard ? + "mstore(, memoryguard())" : + "mstore(, )" + } ("memPtr", to_string(CompilerUtils::freeMemoryPointer)) - ("freeMemoryStart", to_string(CompilerUtils::generalPurposeMemoryStart + m_context.reservedMemory())) - .render(); + ( + "freeMemoryStart", + to_string(CompilerUtils::generalPurposeMemoryStart + m_context.reservedMemory()) + ).render(); } void IRGenerator::resetContext(ContractDefinition const& _contract) diff --git a/libsolidity/codegen/ir/IRGenerator.h b/libsolidity/codegen/ir/IRGenerator.h index e57b8535e..94433912d 100644 --- a/libsolidity/codegen/ir/IRGenerator.h +++ b/libsolidity/codegen/ir/IRGenerator.h @@ -100,7 +100,9 @@ private: std::string dispatchRoutine(ContractDefinition const& _contract); - std::string memoryInit(); + /// @a _useMemoryGuard If true, use a memory guard, allowing the optimiser + /// to perform memory optimizations. + std::string memoryInit(bool _useMemoryGuard); void resetContext(ContractDefinition const& _contract); diff --git a/libsolidity/codegen/ir/IRGeneratorForStatements.cpp b/libsolidity/codegen/ir/IRGeneratorForStatements.cpp index 176f876bd..548351c60 100644 --- a/libsolidity/codegen/ir/IRGeneratorForStatements.cpp +++ b/libsolidity/codegen/ir/IRGeneratorForStatements.cpp @@ -1858,6 +1858,7 @@ void IRGeneratorForStatements::endVisit(MemberAccess const& _memberAccess) bool IRGeneratorForStatements::visit(InlineAssembly const& _inlineAsm) { setLocation(_inlineAsm); + m_context.setInlineAssemblySeen(); CopyTranslate bodyCopier{_inlineAsm.dialect(), m_context, _inlineAsm.annotation().externalReferences}; yul::Statement modified = bodyCopier(_inlineAsm.operations()); diff --git a/libyul/CMakeLists.txt b/libyul/CMakeLists.txt index bbc52d8f2..a6d41bb07 100644 --- a/libyul/CMakeLists.txt +++ b/libyul/CMakeLists.txt @@ -105,6 +105,8 @@ add_library(yul optimiser/ForLoopInitRewriter.h optimiser/FullInliner.cpp optimiser/FullInliner.h + optimiser/FunctionCallFinder.cpp + optimiser/FunctionCallFinder.h optimiser/FunctionGrouper.cpp optimiser/FunctionGrouper.h optimiser/FunctionHoister.cpp @@ -150,6 +152,10 @@ add_library(yul optimiser/SimplificationRules.h optimiser/StackCompressor.cpp optimiser/StackCompressor.h + optimiser/StackLimitEvader.cpp + optimiser/StackLimitEvader.h + optimiser/StackToMemoryMover.cpp + optimiser/StackToMemoryMover.h optimiser/StructuralSimplifier.cpp optimiser/StructuralSimplifier.h optimiser/Substitution.cpp diff --git a/libyul/CompilabilityChecker.cpp b/libyul/CompilabilityChecker.cpp index 2a99e3c79..4b1a99a74 100644 --- a/libyul/CompilabilityChecker.cpp +++ b/libyul/CompilabilityChecker.cpp @@ -33,7 +33,7 @@ using namespace solidity; using namespace solidity::yul; using namespace solidity::util; -map CompilabilityChecker::run( +CompilabilityChecker::CompilabilityChecker( Dialect const& _dialect, Object const& _object, bool _optimizeStackAllocation @@ -63,12 +63,11 @@ map CompilabilityChecker::run( ); transform(*_object.code); - std::map functions; for (StackTooDeepError const& error: transform.stackErrors()) - functions[error.functionName] = max(error.depth, functions[error.functionName]); - - return functions; + { + unreachableVariables[error.functionName].emplace(error.variable); + int& deficit = stackDeficit[error.functionName]; + deficit = std::max(error.depth, deficit); + } } - else - return {}; } diff --git a/libyul/CompilabilityChecker.h b/libyul/CompilabilityChecker.h index 1267640bf..ba9191e28 100644 --- a/libyul/CompilabilityChecker.h +++ b/libyul/CompilabilityChecker.h @@ -33,22 +33,20 @@ namespace solidity::yul /** * Component that checks whether all variables are reachable on the stack and - * returns a mapping from function name to the largest stack difference found - * in that function (no entry present if that function is compilable). + * provides a mapping from function name to the largest stack difference found + * in that function (no entry present if that function is compilable), as well + * as the set of unreachable variables for each function. * * This only works properly if the outermost block is compilable and * functions are not nested. Otherwise, it might miss reporting some functions. * * Only checks the code of the object itself, does not descend into sub-objects. */ -class CompilabilityChecker +struct CompilabilityChecker { -public: - static std::map run( - Dialect const& _dialect, - Object const& _object, - bool _optimizeStackAllocation - ); + CompilabilityChecker(Dialect const& _dialect, Object const& _object, bool _optimizeStackAllocation); + std::map> unreachableVariables; + std::map stackDeficit; }; } diff --git a/libyul/backends/evm/EVMDialect.cpp b/libyul/backends/evm/EVMDialect.cpp index dc648248b..2c3b6be6d 100644 --- a/libyul/backends/evm/EVMDialect.cpp +++ b/libyul/backends/evm/EVMDialect.cpp @@ -142,6 +142,23 @@ map createBuiltins(langutil::EVMVersion _evmVe Expression const& arg = _call.arguments.front(); _assembly.appendLinkerSymbol(std::get(arg).value.str()); })); + + builtins.emplace(createFunction( + "memoryguard", + 1, + 1, + SideEffects{}, + {LiteralKind::Number}, + []( + FunctionCall const& _call, + AbstractAssembly& _assembly, + BuiltinContext&, + function _visitExpression + ) { + visitArguments(_assembly, _call, _visitExpression); + }) + ); + builtins.emplace(createFunction("datasize", 1, 1, SideEffects{}, {LiteralKind::String}, []( FunctionCall const& _call, AbstractAssembly& _assembly, diff --git a/libyul/backends/wasm/EVMToEwasmTranslator.cpp b/libyul/backends/wasm/EVMToEwasmTranslator.cpp index 4495ea13b..4f9b0ed08 100644 --- a/libyul/backends/wasm/EVMToEwasmTranslator.cpp +++ b/libyul/backends/wasm/EVMToEwasmTranslator.cpp @@ -1211,6 +1211,9 @@ function revert(x1, x2, x3, x4, y1, y2, y3, y4) { function invalid() { unreachable() } +function memoryguard(x:i64) -> y1, y2, y3, y4 { + y4 := x +} } )"}; diff --git a/libyul/optimiser/FunctionCallFinder.cpp b/libyul/optimiser/FunctionCallFinder.cpp new file mode 100644 index 000000000..d5a6afcc9 --- /dev/null +++ b/libyul/optimiser/FunctionCallFinder.cpp @@ -0,0 +1,39 @@ +/* + This file is part of solidity. + + solidity is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + solidity is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with solidity. If not, see . +*/ + +#include +#include + +using namespace std; +using namespace solidity; +using namespace solidity::yul; + +vector FunctionCallFinder::run(Block& _block, YulString _functionName) +{ + FunctionCallFinder functionCallFinder(_functionName); + functionCallFinder(_block); + return functionCallFinder.m_calls; +} + +FunctionCallFinder::FunctionCallFinder(YulString _functionName): m_functionName(_functionName) {} + +void FunctionCallFinder::operator()(FunctionCall& _functionCall) +{ + ASTModifier::operator()(_functionCall); + if (_functionCall.functionName.name == m_functionName) + m_calls.emplace_back(&_functionCall); +} \ No newline at end of file diff --git a/libyul/optimiser/FunctionCallFinder.h b/libyul/optimiser/FunctionCallFinder.h new file mode 100644 index 000000000..365f86688 --- /dev/null +++ b/libyul/optimiser/FunctionCallFinder.h @@ -0,0 +1,47 @@ +/* + This file is part of solidity. + + solidity is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + solidity is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with solidity. If not, see . +*/ +/** + * AST walker that finds all calls to a function of a given name. + */ + +#pragma once + +#include + +#include + +namespace solidity::yul +{ + +/** + * AST walker that finds all calls to a function of a given name. + * + * Prerequisite: Disambiguator + */ +class FunctionCallFinder: ASTModifier +{ +public: + static std::vector run(Block& _block, YulString _functionName); +private: + FunctionCallFinder(YulString _functionName); + using ASTModifier::operator(); + void operator()(FunctionCall& _functionCall) override; + YulString m_functionName; + std::vector m_calls; +}; + +} diff --git a/libyul/optimiser/StackCompressor.cpp b/libyul/optimiser/StackCompressor.cpp index d011c64b8..98850f03e 100644 --- a/libyul/optimiser/StackCompressor.cpp +++ b/libyul/optimiser/StackCompressor.cpp @@ -168,7 +168,7 @@ bool StackCompressor::run( bool allowMSizeOptimzation = !MSizeFinder::containsMSize(_dialect, *_object.code); for (size_t iterations = 0; iterations < _maxIterations; iterations++) { - map stackSurplus = CompilabilityChecker::run(_dialect, _object, _optimizeStackAllocation); + map stackSurplus = CompilabilityChecker(_dialect, _object, _optimizeStackAllocation).stackDeficit; if (stackSurplus.empty()) return true; diff --git a/libyul/optimiser/StackLimitEvader.cpp b/libyul/optimiser/StackLimitEvader.cpp new file mode 100644 index 000000000..1849a9074 --- /dev/null +++ b/libyul/optimiser/StackLimitEvader.cpp @@ -0,0 +1,142 @@ +/* + This file is part of solidity. + + solidity is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + solidity is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with solidity. If not, see . +*/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +using namespace std; +using namespace solidity; +using namespace solidity::yul; + +namespace +{ +// Walks the call graph using a Depth-First-Search assigning memory offsets to variables. +// - The leaves of the call graph will get the lowest offsets, increasing towards the root. +// - ``nextAvailableSlot`` maps a function to the next available slot that can be used by another +// function that calls it. +// - For each function starting from the root of the call graph: +// - Visit all children that are not already visited. +// - Determine the maximum value ``n`` of the values of ``nextAvailableSlot`` among the children. +// - If the function itself contains variables that need memory slots, but is contained in a cycle, +// abort the process as failure. +// - If not, assign each variable its slot starting from ``n`` (incrementing it). +// - Assign ``n`` to ``nextAvailableSlot`` of the function. +struct MemoryOffsetAllocator +{ + uint64_t run(YulString _function = YulString{}) + { + if (nextAvailableSlot.count(_function)) + return nextAvailableSlot[_function]; + + // Assign to zero early to guard against recursive calls. + nextAvailableSlot[_function] = 0; + + uint64_t nextSlot = 0; + if (callGraph.count(_function)) + for (YulString child: callGraph.at(_function)) + nextSlot = std::max(run(child), nextSlot); + + if (unreachableVariables.count(_function)) + { + yulAssert(!slotAllocations.count(_function), ""); + auto& assignedSlots = slotAllocations[_function]; + for (YulString variable: unreachableVariables.at(_function)) + if (variable.empty()) + { + // TODO: Too many function arguments or return parameters. + } + else + assignedSlots[variable] = nextSlot++; + } + + return nextAvailableSlot[_function] = nextSlot; + } + + map> const& unreachableVariables; + map> const& callGraph; + + map> slotAllocations{}; + map nextAvailableSlot{}; +}; + +u256 literalArgumentValue(FunctionCall const& _call) +{ + yulAssert(_call.arguments.size() == 1, ""); + Literal const* literal = std::get_if(&_call.arguments.front()); + yulAssert(literal && literal->kind == LiteralKind::Number, ""); + return valueOfLiteral(*literal); +} +} + +void StackLimitEvader::run( + OptimiserStepContext& _context, + Object& _object, + map> const& _unreachableVariables +) +{ + yulAssert(_object.code, ""); + auto const* evmDialect = dynamic_cast(&_context.dialect); + yulAssert( + evmDialect && evmDialect->providesObjectAccess(), + "StackLimitEvader can only be run on objects using the EVMDialect with object access." + ); + + vector memoryGuardCalls = FunctionCallFinder::run( + *_object.code, + "memoryguard"_yulstring + ); + // Do not optimise, if no ``memoryguard`` call is found. + if (memoryGuardCalls.empty()) + return; + + // Make sure all calls to ``memoryguard`` we found have the same value as argument (otherwise, abort). + u256 reservedMemory = literalArgumentValue(*memoryGuardCalls.front()); + for (FunctionCall const* getFreeMemoryStartCall: memoryGuardCalls) + if (reservedMemory != literalArgumentValue(*getFreeMemoryStartCall)) + return; + + CallGraph callGraph = CallGraphGenerator::callGraph(*_object.code); + + // We cannot move variables in recursive functions to fixed memory offsets. + for (YulString function: callGraph.recursiveFunctions()) + if (_unreachableVariables.count(function)) + return; + + MemoryOffsetAllocator memoryOffsetAllocator{_unreachableVariables, callGraph.functionCalls}; + uint64_t requiredSlots = memoryOffsetAllocator.run(); + + StackToMemoryMover{_context, reservedMemory, memoryOffsetAllocator.slotAllocations}(*_object.code); + reservedMemory += 32 * requiredSlots; + YulString reservedMemoryString{util::toCompactHexWithPrefix(reservedMemory)}; + for (FunctionCall* memoryGuardCall: memoryGuardCalls) + { + Literal* literal = std::get_if(&memoryGuardCall->arguments.front()); + yulAssert(literal && literal->kind == LiteralKind::Number, ""); + literal->value = reservedMemoryString; + } +} diff --git a/libyul/optimiser/StackLimitEvader.h b/libyul/optimiser/StackLimitEvader.h new file mode 100644 index 000000000..4fc351f73 --- /dev/null +++ b/libyul/optimiser/StackLimitEvader.h @@ -0,0 +1,66 @@ +/* + This file is part of solidity. + + solidity is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + solidity is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with solidity. If not, see . +*/ +/** + * Optimisation stage that assigns memory offsets to variables that would become unreachable if + * assigned a stack slot as usual and replaces references and assignments to them by mload and mstore calls. + */ + +#pragma once + +#include + +namespace solidity::yul +{ + +struct Object; + +/** + * Optimisation stage that assigns memory offsets to variables that would become unreachable if + * assigned a stack slot as usual. + * + * Uses CompilabilityChecker to determine which variables in which functions are unreachable. + * + * Only variables outside of functions contained in cycles in the call graph are considered. Thereby it is possible + * to assign globally fixed memory offsets to the variable. If a variable in a function contained in a cycle in the + * call graph is reported as unreachable, the process is aborted. + * + * Offsets are assigned to the variables, s.t. on every path through the call graph each variable gets a unique offset + * in memory. However, distinct paths through the call graph can use the same memory offsets for their variables. + * + * The current arguments to the ``memoryguard`` calls are used as base memory offset and then replaced by the offset past + * the last memory offset used for a variable on any path through the call graph. + * + * Finally, the StackToMemoryMover is called to actually move the variables to their offsets in memory. + * + * Prerequisite: Disambiguator + */ +class StackLimitEvader +{ +public: + /// @a _unreachableVariables can be determined by the CompilabilityChecker. + /// Can only be run on the EVM dialect with objects. + /// Abort and do nothing, if no ``memoryguard`` call or several ``memoryguard`` calls + /// with non-matching arguments are found, or if any of the @a _unreachableVariables + /// are contained in a recursive function. + static void run( + OptimiserStepContext& _context, + Object& _object, + std::map> const& _unreachableVariables + ); +}; + +} diff --git a/libyul/optimiser/StackToMemoryMover.cpp b/libyul/optimiser/StackToMemoryMover.cpp new file mode 100644 index 000000000..2c1cfa227 --- /dev/null +++ b/libyul/optimiser/StackToMemoryMover.cpp @@ -0,0 +1,210 @@ +/* + This file is part of solidity. + + solidity is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + solidity is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with solidity. If not, see . +*/ +#include +#include +#include + +#include + +#include +#include + +using namespace std; +using namespace solidity; +using namespace solidity::yul; + +namespace +{ +void appendMemoryStore( + vector& _statements, + langutil::SourceLocation const& _loc, + YulString _mpos, + Expression _value +) +{ + _statements.emplace_back(ExpressionStatement{_loc, FunctionCall{ + _loc, + Identifier{_loc, "mstore"_yulstring}, + { + Literal{_loc, LiteralKind::Number, _mpos, {}}, + std::move(_value) + } + }}); +} +} + +StackToMemoryMover::StackToMemoryMover( + OptimiserStepContext& _context, + u256 _reservedMemory, + map> const& _memorySlots +): m_reservedMemory(std::move(_reservedMemory)), m_memorySlots(_memorySlots), m_nameDispenser(_context.dispenser) +{ + auto const* evmDialect = dynamic_cast(&_context.dialect); + yulAssert( + evmDialect && evmDialect->providesObjectAccess(), + "StackToMemoryMover can only be run on objects using the EVMDialect with object access." + ); + + if (m_memorySlots.count(YulString{})) + // If the global scope contains variables to be moved, start with those as if it were a function. + m_currentFunctionMemorySlots = &m_memorySlots.at(YulString{}); +} + +void StackToMemoryMover::operator()(FunctionDefinition& _functionDefinition) +{ + map const* saved = m_currentFunctionMemorySlots; + if (m_memorySlots.count(_functionDefinition.name)) + { + m_currentFunctionMemorySlots = &m_memorySlots.at(_functionDefinition.name); + for (TypedName const& param: _functionDefinition.parameters + _functionDefinition.returnVariables) + if (m_currentFunctionMemorySlots->count(param.name)) + { + // TODO: we cannot handle function parameters yet. + m_currentFunctionMemorySlots = nullptr; + break; + } + } + else + m_currentFunctionMemorySlots = nullptr; + ASTModifier::operator()(_functionDefinition); + m_currentFunctionMemorySlots = saved; +} + +void StackToMemoryMover::operator()(Block& _block) +{ + using OptionalStatements = std::optional>; + if (!m_currentFunctionMemorySlots) + { + ASTModifier::operator()(_block); + return; + } + auto containsVariableNeedingEscalation = [&](auto const& _variables) { + return util::contains_if(_variables, [&](auto const& var) { + return m_currentFunctionMemorySlots->count(var.name); + }); + }; + auto rewriteAssignmentOrVariableDeclaration = [&]( + langutil::SourceLocation const& _loc, + auto const& _variables, + std::unique_ptr _value + ) -> std::vector { + if (_variables.size() == 1) + { + std::vector result; + appendMemoryStore( + result, + _loc, + memoryOffset(_variables.front().name), + _value ? *std::move(_value) : Literal{_loc, LiteralKind::Number, "0"_yulstring, {}} + ); + return result; + } + + VariableDeclaration tempDecl{_loc, {}, std::move(_value)}; + vector memoryAssignments; + vector variableAssignments; + for (auto& var: _variables) + { + YulString tempVarName = m_nameDispenser.newName(var.name); + tempDecl.variables.emplace_back(TypedName{var.location, tempVarName, {}}); + + if (m_currentFunctionMemorySlots->count(var.name)) + appendMemoryStore(memoryAssignments, _loc, memoryOffset(var.name), Identifier{_loc, tempVarName}); + else if constexpr (std::is_same_v, Identifier>) + variableAssignments.emplace_back(Assignment{ + _loc, { Identifier{var.location, var.name} }, + make_unique(Identifier{_loc, tempVarName}) + }); + else + variableAssignments.emplace_back(VariableDeclaration{ + _loc, {std::move(var)}, + make_unique(Identifier{_loc, tempVarName}) + }); + } + std::vector result; + result.emplace_back(std::move(tempDecl)); + std::reverse(memoryAssignments.begin(), memoryAssignments.end()); + result += std::move(memoryAssignments); + std::reverse(variableAssignments.begin(), variableAssignments.end()); + result += std::move(variableAssignments); + return result; + }; + + util::iterateReplacing( + _block.statements, + [&](Statement& _statement) + { + auto defaultVisit = [&]() { ASTModifier::visit(_statement); return OptionalStatements{}; }; + return std::visit(util::GenericVisitor{ + [&](Assignment& _assignment) -> OptionalStatements + { + if (!containsVariableNeedingEscalation(_assignment.variableNames)) + return defaultVisit(); + visit(*_assignment.value); + return {rewriteAssignmentOrVariableDeclaration( + _assignment.location, + _assignment.variableNames, + std::move(_assignment.value) + )}; + }, + [&](VariableDeclaration& _varDecl) -> OptionalStatements + { + if (!containsVariableNeedingEscalation(_varDecl.variables)) + return defaultVisit(); + if (_varDecl.value) + visit(*_varDecl.value); + return {rewriteAssignmentOrVariableDeclaration( + _varDecl.location, + _varDecl.variables, + std::move(_varDecl.value) + )}; + }, + [&](auto&) { return defaultVisit(); } + }, _statement); + }); +} + +void StackToMemoryMover::visit(Expression& _expression) +{ + if ( + Identifier* identifier = std::get_if(&_expression); + identifier && m_currentFunctionMemorySlots && m_currentFunctionMemorySlots->count(identifier->name) + ) + { + langutil::SourceLocation loc = identifier->location; + _expression = FunctionCall { + loc, + Identifier{loc, "mload"_yulstring}, { + Literal { + loc, + LiteralKind::Number, + memoryOffset(identifier->name), + {} + } + } + }; + } + else + ASTModifier::visit(_expression); +} + +YulString StackToMemoryMover::memoryOffset(YulString _variable) +{ + yulAssert(m_currentFunctionMemorySlots, ""); + return YulString{util::toCompactHexWithPrefix(m_reservedMemory + 32 * m_currentFunctionMemorySlots->at(_variable))}; +} + diff --git a/libyul/optimiser/StackToMemoryMover.h b/libyul/optimiser/StackToMemoryMover.h new file mode 100644 index 000000000..d4ef2f737 --- /dev/null +++ b/libyul/optimiser/StackToMemoryMover.h @@ -0,0 +1,100 @@ +/* + This file is part of solidity. + + solidity is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + solidity is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with solidity. If not, see . +*/ +/** + * Optimisation stage that moves Yul variables from stack to memory. + */ + +#pragma once + +#include +#include +#include + +namespace solidity::yul +{ + +/** + * Optimisation stage that moves Yul variables from stack to memory. + * It takes a map from functions names and variable names to memory offsets. + * It then transforms the AST as follows: + * + * Single variable declarations are replaced by mstore's as follows: + * If a is in the map, replace + * let a + * by + * mstore(, 0) + * respectively, replace + * let a := expr + * by + * mstore(, expr) + * + * In a multi-variable declaration, variables to be moved are replaced by fresh variables and then moved to memory: + * If b and d are in the map, replace + * let a, b, c, d := f() + * by + * let _1, _2, _3, _4 := f() + * mstore(, _4) + * mstore(, _2) + * let c := _3 + * let a := _1 + * + * Assignments to single variables are replaced by mstore's: + * If a is in the map, replace + * a := expr + * by + * mstore(, expr) + * + * Assignments to multiple variables are split up similarly to multi-variable declarations: + * If b and d are in the map, replace + * a, b, c, d := f() + * by + * let _1, _2, _3, _4 := f() + * mstore(, _4) + * mstore(, _2) + * c := _3 + * a := _1 + * + * Replace all references to a variable ``a`` in the map by ``mload()``. + * + * If a visited function has arguments or return parameters that are contained in the map, + * the entire function is skipped (no local variables in the function will be moved at all). + * + * Prerequisite: Disambiguator, ForLoopInitRewriter. + */ +class StackToMemoryMover: ASTModifier +{ +public: + StackToMemoryMover( + OptimiserStepContext& _context, + u256 _reservedMemory, + std::map> const& _memoryOffsets + ); + + using ASTModifier::operator(); + + void operator()(FunctionDefinition& _functionDefinition) override; + void operator()(Block& _block) override; + void visit(Expression& _expression) override; +private: + YulString memoryOffset(YulString _variable); + u256 m_reservedMemory; + std::map> const& m_memorySlots; + NameDispenser& m_nameDispenser; + std::map const* m_currentFunctionMemorySlots = nullptr; +}; + +} \ No newline at end of file diff --git a/libyul/optimiser/Suite.cpp b/libyul/optimiser/Suite.cpp index 98683b453..11249c23d 100644 --- a/libyul/optimiser/Suite.cpp +++ b/libyul/optimiser/Suite.cpp @@ -51,6 +51,7 @@ #include #include #include +#include #include #include #include @@ -73,6 +74,7 @@ #include #include +#include using namespace std; using namespace solidity; @@ -124,6 +126,12 @@ void OptimiserSuite::run( { yulAssert(_meter, ""); ConstantOptimiser{*dialect, *_meter}(ast); + if (dialect->providesObjectAccess()) + StackLimitEvader::run(suite.m_context, _object, CompilabilityChecker{ + _dialect, + _object, + _optimizeStackAllocation + }.unreachableVariables); } else if (dynamic_cast(&_dialect)) { diff --git a/test/cmdlineTests/ir_compiler_inheritance_nosubobjects/output b/test/cmdlineTests/ir_compiler_inheritance_nosubobjects/output index fd372367a..6af2128e1 100644 --- a/test/cmdlineTests/ir_compiler_inheritance_nosubobjects/output +++ b/test/cmdlineTests/ir_compiler_inheritance_nosubobjects/output @@ -9,7 +9,7 @@ Optimized IR: object "C_6" { code { { - mstore(64, 128) + mstore(64, memoryguard(0x80)) if callvalue() { revert(0, 0) } let _1 := datasize("C_6_deployed") codecopy(0, dataoffset("C_6_deployed"), _1) @@ -19,7 +19,7 @@ object "C_6" { object "C_6_deployed" { code { { - mstore(64, 128) + mstore(64, memoryguard(0x80)) revert(0, 0) } } @@ -37,7 +37,7 @@ Optimized IR: object "D_9" { code { { - mstore(64, 128) + mstore(64, memoryguard(0x80)) if callvalue() { revert(0, 0) } let _1 := datasize("D_9_deployed") codecopy(0, dataoffset("D_9_deployed"), _1) @@ -47,7 +47,7 @@ object "D_9" { object "D_9_deployed" { code { { - mstore(64, 128) + mstore(64, memoryguard(0x80)) revert(0, 0) } } diff --git a/test/cmdlineTests/ir_compiler_subobjects/output b/test/cmdlineTests/ir_compiler_subobjects/output index 6b5f8e677..012c19828 100644 --- a/test/cmdlineTests/ir_compiler_subobjects/output +++ b/test/cmdlineTests/ir_compiler_subobjects/output @@ -9,7 +9,7 @@ Optimized IR: object "C_2" { code { { - mstore(64, 128) + mstore(64, memoryguard(0x80)) if callvalue() { revert(0, 0) } let _1 := datasize("C_2_deployed") codecopy(0, dataoffset("C_2_deployed"), _1) @@ -19,7 +19,7 @@ object "C_2" { object "C_2_deployed" { code { { - mstore(64, 128) + mstore(64, memoryguard(0x80)) revert(0, 0) } } @@ -37,7 +37,7 @@ Optimized IR: object "D_13" { code { { - mstore(64, 128) + mstore(64, memoryguard(0x80)) if callvalue() { revert(0, 0) } let _1 := datasize("D_13_deployed") codecopy(0, dataoffset("D_13_deployed"), _1) @@ -47,20 +47,21 @@ object "D_13" { object "D_13_deployed" { code { { - mstore(64, 128) + let _1 := memoryguard(0x80) + mstore(64, _1) if iszero(lt(calldatasize(), 4)) { - let _1 := 0 - if eq(0x26121ff0, shr(224, calldataload(_1))) + let _2 := 0 + if eq(0x26121ff0, shr(224, calldataload(_2))) { - if callvalue() { revert(_1, _1) } - if slt(add(calldatasize(), not(3)), _1) { revert(_1, _1) } - let _2 := datasize("C_2") - let _3 := add(128, _2) - if or(gt(_3, 0xffffffffffffffff), lt(_3, 128)) { revert(_1, _1) } - datacopy(128, dataoffset("C_2"), _2) - pop(create(_1, 128, _2)) - return(allocateMemory(_1), _1) + if callvalue() { revert(_2, _2) } + if slt(add(calldatasize(), not(3)), _2) { revert(_2, _2) } + let _3 := datasize("C_2") + let _4 := add(_1, _3) + if or(gt(_4, 0xffffffffffffffff), lt(_4, _1)) { revert(_2, _2) } + datacopy(_1, dataoffset("C_2"), _3) + pop(create(_2, _1, sub(_4, _1))) + return(allocateMemory(_2), _2) } } revert(0, 0) @@ -76,7 +77,7 @@ object "D_13" { object "C_2" { code { { - mstore(64, 128) + mstore(64, memoryguard(0x80)) if callvalue() { revert(0, 0) } let _1 := datasize("C_2_deployed") codecopy(0, dataoffset("C_2_deployed"), _1) @@ -86,7 +87,7 @@ object "D_13" { object "C_2_deployed" { code { { - mstore(64, 128) + mstore(64, memoryguard(0x80)) revert(0, 0) } } diff --git a/test/cmdlineTests/ir_with_assembly_no_memoryguard_creation/args b/test/cmdlineTests/ir_with_assembly_no_memoryguard_creation/args new file mode 100644 index 000000000..cae21e720 --- /dev/null +++ b/test/cmdlineTests/ir_with_assembly_no_memoryguard_creation/args @@ -0,0 +1 @@ +--ir-optimized --optimize \ No newline at end of file diff --git a/test/cmdlineTests/ir_with_assembly_no_memoryguard_creation/input.sol b/test/cmdlineTests/ir_with_assembly_no_memoryguard_creation/input.sol new file mode 100644 index 000000000..6dd033f13 --- /dev/null +++ b/test/cmdlineTests/ir_with_assembly_no_memoryguard_creation/input.sol @@ -0,0 +1,7 @@ +// SPDX-License-Identifier: GPL-3.0 +pragma solidity >=0.0.0; + +contract D { + constructor() { assembly {}} + function f() public pure {} +} diff --git a/test/cmdlineTests/ir_with_assembly_no_memoryguard_creation/output b/test/cmdlineTests/ir_with_assembly_no_memoryguard_creation/output new file mode 100644 index 000000000..5314caa65 --- /dev/null +++ b/test/cmdlineTests/ir_with_assembly_no_memoryguard_creation/output @@ -0,0 +1,40 @@ +Optimized IR: +/******************************************************* + * WARNING * + * Solidity to Yul compilation is still EXPERIMENTAL * + * It can result in LOSS OF FUNDS or worse * + * !USE AT YOUR OWN RISK! * + *******************************************************/ + +object "D_11" { + code { + { + mstore(64, 128) + if callvalue() { revert(0, 0) } + let _1 := datasize("D_11_deployed") + codecopy(0, dataoffset("D_11_deployed"), _1) + return(0, _1) + } + } + object "D_11_deployed" { + code { + { + let _1 := memoryguard(0x80) + mstore(64, _1) + if iszero(lt(calldatasize(), 4)) + { + let _2 := 0 + if eq(0x26121ff0, shr(224, calldataload(_2))) + { + if callvalue() { revert(_2, _2) } + if slt(add(calldatasize(), not(3)), _2) { revert(_2, _2) } + if gt(_1, 0xffffffffffffffff) { revert(_2, _2) } + mstore(64, _1) + return(_1, _2) + } + } + revert(0, 0) + } + } + } +} diff --git a/test/cmdlineTests/ir_with_assembly_no_memoryguard_runtime/args b/test/cmdlineTests/ir_with_assembly_no_memoryguard_runtime/args new file mode 100644 index 000000000..cae21e720 --- /dev/null +++ b/test/cmdlineTests/ir_with_assembly_no_memoryguard_runtime/args @@ -0,0 +1 @@ +--ir-optimized --optimize \ No newline at end of file diff --git a/test/cmdlineTests/ir_with_assembly_no_memoryguard_runtime/input.sol b/test/cmdlineTests/ir_with_assembly_no_memoryguard_runtime/input.sol new file mode 100644 index 000000000..caef2b75e --- /dev/null +++ b/test/cmdlineTests/ir_with_assembly_no_memoryguard_runtime/input.sol @@ -0,0 +1,8 @@ +// SPDX-License-Identifier: GPL-3.0 +pragma solidity >=0.0.0; + +contract D { + function f() public pure { + assembly {} + } +} diff --git a/test/cmdlineTests/ir_with_assembly_no_memoryguard_runtime/output b/test/cmdlineTests/ir_with_assembly_no_memoryguard_runtime/output new file mode 100644 index 000000000..25d7edfb2 --- /dev/null +++ b/test/cmdlineTests/ir_with_assembly_no_memoryguard_runtime/output @@ -0,0 +1,38 @@ +Optimized IR: +/******************************************************* + * WARNING * + * Solidity to Yul compilation is still EXPERIMENTAL * + * It can result in LOSS OF FUNDS or worse * + * !USE AT YOUR OWN RISK! * + *******************************************************/ + +object "D_7" { + code { + { + mstore(64, memoryguard(0x80)) + if callvalue() { revert(0, 0) } + let _1 := datasize("D_7_deployed") + codecopy(0, dataoffset("D_7_deployed"), _1) + return(0, _1) + } + } + object "D_7_deployed" { + code { + { + mstore(64, 128) + if iszero(lt(calldatasize(), 4)) + { + let _1 := 0 + if eq(0x26121ff0, shr(224, calldataload(_1))) + { + if callvalue() { revert(_1, _1) } + if slt(add(calldatasize(), not(3)), _1) { revert(_1, _1) } + mstore(64, 128) + return(128, _1) + } + } + revert(0, 0) + } + } + } +} diff --git a/test/cmdlineTests/name_simplifier/output b/test/cmdlineTests/name_simplifier/output index f2ec8e0cc..085729802 100644 --- a/test/cmdlineTests/name_simplifier/output +++ b/test/cmdlineTests/name_simplifier/output @@ -9,7 +9,7 @@ Optimized IR: object "C_56" { code { { - mstore(64, 128) + mstore(64, memoryguard(0x80)) if callvalue() { revert(0, 0) } let _1 := datasize("C_56_deployed") codecopy(0, dataoffset("C_56_deployed"), _1) @@ -19,7 +19,7 @@ object "C_56" { object "C_56_deployed" { code { { - mstore(64, 128) + mstore(64, memoryguard(0x80)) if iszero(lt(calldatasize(), 4)) { let _1 := 0 diff --git a/test/cmdlineTests/optimizer_array_sload/output b/test/cmdlineTests/optimizer_array_sload/output index 31c17a0a5..515738473 100644 --- a/test/cmdlineTests/optimizer_array_sload/output +++ b/test/cmdlineTests/optimizer_array_sload/output @@ -9,7 +9,7 @@ Optimized IR: object "Arraysum_33" { code { { - mstore(64, 128) + mstore(64, memoryguard(0x80)) if callvalue() { revert(0, 0) } let _1 := datasize("Arraysum_33_deployed") codecopy(0, dataoffset("Arraysum_33_deployed"), _1) @@ -19,7 +19,7 @@ object "Arraysum_33" { object "Arraysum_33_deployed" { code { { - mstore(64, 128) + mstore(64, memoryguard(0x80)) if iszero(lt(calldatasize(), 4)) { let _1 := 0 diff --git a/test/cmdlineTests/standard_irOptimized_requested/output.json b/test/cmdlineTests/standard_irOptimized_requested/output.json index ba3cb8b2a..09e7730f5 100644 --- a/test/cmdlineTests/standard_irOptimized_requested/output.json +++ b/test/cmdlineTests/standard_irOptimized_requested/output.json @@ -7,7 +7,7 @@ object \"C_6\" { code { - mstore(64, 128) + mstore(64, memoryguard(128)) if callvalue() { revert(0, 0) } constructor_C_6() codecopy(0, dataoffset(\"C_6_deployed\"), datasize(\"C_6_deployed\")) @@ -17,7 +17,7 @@ object \"C_6\" { } object \"C_6_deployed\" { code { - mstore(64, 128) + mstore(64, memoryguard(128)) if iszero(lt(calldatasize(), 4)) { let selector := shift_right_224_unsigned(calldataload(0)) diff --git a/test/cmdlineTests/standard_ir_requested/output.json b/test/cmdlineTests/standard_ir_requested/output.json index f63075090..cae921a47 100644 --- a/test/cmdlineTests/standard_ir_requested/output.json +++ b/test/cmdlineTests/standard_ir_requested/output.json @@ -8,7 +8,7 @@ object \"C_6\" { code { - mstore(64, 128) + mstore(64, memoryguard(128)) if callvalue() { revert(0, 0) } constructor_C_6() @@ -24,7 +24,7 @@ object \"C_6\" { } object \"C_6_deployed\" { code { - mstore(64, 128) + mstore(64, memoryguard(128)) if iszero(lt(calldatasize(), 4)) { diff --git a/test/cmdlineTests/yul_optimizer_steps/output b/test/cmdlineTests/yul_optimizer_steps/output index 847c731e9..b37f0b44e 100644 --- a/test/cmdlineTests/yul_optimizer_steps/output +++ b/test/cmdlineTests/yul_optimizer_steps/output @@ -9,7 +9,7 @@ Optimized IR: object "C_6" { code { { - mstore(64, 128) + mstore(64, memoryguard(0x80)) if callvalue() { revert(0, 0) } codecopy(0, dataoffset("C_6_deployed"), datasize("C_6_deployed")) return(0, datasize("C_6_deployed")) @@ -18,7 +18,7 @@ object "C_6" { object "C_6_deployed" { code { { - mstore(64, 128) + mstore(64, memoryguard(0x80)) if iszero(lt(calldatasize(), 4)) { let selector := shift_right_224_unsigned(calldataload(0)) diff --git a/test/cmdlineTests/yul_string_format_ascii/output.json b/test/cmdlineTests/yul_string_format_ascii/output.json index 0609704c5..ad1c92433 100644 --- a/test/cmdlineTests/yul_string_format_ascii/output.json +++ b/test/cmdlineTests/yul_string_format_ascii/output.json @@ -8,7 +8,7 @@ object \"C_10\" { code { - mstore(64, 128) + mstore(64, memoryguard(128)) if callvalue() { revert(0, 0) } constructor_C_10() @@ -24,7 +24,7 @@ object \"C_10\" { } object \"C_10_deployed\" { code { - mstore(64, 128) + mstore(64, memoryguard(128)) if iszero(lt(calldatasize(), 4)) { diff --git a/test/cmdlineTests/yul_string_format_ascii_bytes32/output.json b/test/cmdlineTests/yul_string_format_ascii_bytes32/output.json index 1bc00b338..d1907f22c 100644 --- a/test/cmdlineTests/yul_string_format_ascii_bytes32/output.json +++ b/test/cmdlineTests/yul_string_format_ascii_bytes32/output.json @@ -8,7 +8,7 @@ object \"C_10\" { code { - mstore(64, 128) + mstore(64, memoryguard(128)) if callvalue() { revert(0, 0) } constructor_C_10() @@ -24,7 +24,7 @@ object \"C_10\" { } object \"C_10_deployed\" { code { - mstore(64, 128) + mstore(64, memoryguard(128)) if iszero(lt(calldatasize(), 4)) { diff --git a/test/cmdlineTests/yul_string_format_ascii_bytes32_from_number/output.json b/test/cmdlineTests/yul_string_format_ascii_bytes32_from_number/output.json index fdef77781..0f981bf16 100644 --- a/test/cmdlineTests/yul_string_format_ascii_bytes32_from_number/output.json +++ b/test/cmdlineTests/yul_string_format_ascii_bytes32_from_number/output.json @@ -8,7 +8,7 @@ object \"C_10\" { code { - mstore(64, 128) + mstore(64, memoryguard(128)) if callvalue() { revert(0, 0) } constructor_C_10() @@ -24,7 +24,7 @@ object \"C_10\" { } object \"C_10_deployed\" { code { - mstore(64, 128) + mstore(64, memoryguard(128)) if iszero(lt(calldatasize(), 4)) { diff --git a/test/cmdlineTests/yul_string_format_ascii_long/output.json b/test/cmdlineTests/yul_string_format_ascii_long/output.json index 2a845796d..ecbed13d3 100644 --- a/test/cmdlineTests/yul_string_format_ascii_long/output.json +++ b/test/cmdlineTests/yul_string_format_ascii_long/output.json @@ -8,7 +8,7 @@ object \"C_10\" { code { - mstore(64, 128) + mstore(64, memoryguard(128)) if callvalue() { revert(0, 0) } constructor_C_10() @@ -24,7 +24,7 @@ object \"C_10\" { } object \"C_10_deployed\" { code { - mstore(64, 128) + mstore(64, memoryguard(128)) if iszero(lt(calldatasize(), 4)) { diff --git a/test/cmdlineTests/yul_string_format_hex/output.json b/test/cmdlineTests/yul_string_format_hex/output.json index 784baf2f4..86a658440 100644 --- a/test/cmdlineTests/yul_string_format_hex/output.json +++ b/test/cmdlineTests/yul_string_format_hex/output.json @@ -8,7 +8,7 @@ object \"C_10\" { code { - mstore(64, 128) + mstore(64, memoryguard(128)) if callvalue() { revert(0, 0) } constructor_C_10() @@ -24,7 +24,7 @@ object \"C_10\" { } object \"C_10_deployed\" { code { - mstore(64, 128) + mstore(64, memoryguard(128)) if iszero(lt(calldatasize(), 4)) { diff --git a/test/libsolidity/semanticTests/viaYul/stackLimitEvasion/inlined.sol b/test/libsolidity/semanticTests/viaYul/stackLimitEvasion/inlined.sol new file mode 100644 index 000000000..b9c050b46 --- /dev/null +++ b/test/libsolidity/semanticTests/viaYul/stackLimitEvasion/inlined.sol @@ -0,0 +1,53 @@ +contract C { + uint256[1024] s; + function f() public returns (uint256 x) { + x = 42; + uint256 x0 = s[0]; + uint256 x1 = s[1]; + uint256 x2 = s[2]; + uint256 x3 = s[3]; + uint256 x4 = s[4]; + uint256 x5 = s[5]; + uint256 x6 = s[6]; + uint256 x7 = s[7]; + uint256 x8 = s[8]; + uint256 x9 = s[9]; + uint256 x10 = s[10]; + uint256 x11 = s[11]; + uint256 x12 = s[12]; + uint256 x13 = s[13]; + uint256 x14 = s[14]; + uint256 x15 = s[15]; + uint256 x16 = s[16]; + uint256 x17 = s[17]; + uint256 x18 = s[18]; + s[1000] = x0 + 2; + s[118] = x18; + s[117] = x17; + s[116] = x16; + s[115] = x15; + s[114] = x14; + s[113] = x13; + s[112] = x12; + s[111] = x11; + s[110] = x10; + s[109] = x9; + s[108] = x8; + s[107] = x7; + s[106] = x6; + s[105] = x5; + s[104] = x4; + s[103] = x3; + s[102] = x2; + s[101] = x1; + s[100] = x0; + } + function test() public view returns(uint256) { + return s[1000]; + } +} +// ==== +// compileViaYul: true +// ---- +// f() -> 0x2a +// test() -> 2 diff --git a/test/libsolidity/semanticTests/viaYul/stackLimitEvasion/non_inlined.sol b/test/libsolidity/semanticTests/viaYul/stackLimitEvasion/non_inlined.sol new file mode 100644 index 000000000..9fa3f7117 --- /dev/null +++ b/test/libsolidity/semanticTests/viaYul/stackLimitEvasion/non_inlined.sol @@ -0,0 +1,57 @@ +contract C { + uint256[1024] s; + function g() public returns (uint256) { + // try to prevent inlining + return f() + f() + f() + f() + f(); + } + function f() public returns (uint256 x) { + x = 42; + uint256 x0 = s[0]; + uint256 x1 = s[1]; + uint256 x2 = s[2]; + uint256 x3 = s[3]; + uint256 x4 = s[4]; + uint256 x5 = s[5]; + uint256 x6 = s[6]; + uint256 x7 = s[7]; + uint256 x8 = s[8]; + uint256 x9 = s[9]; + uint256 x10 = s[10]; + uint256 x11 = s[11]; + uint256 x12 = s[12]; + uint256 x13 = s[13]; + uint256 x14 = s[14]; + uint256 x15 = s[15]; + uint256 x16 = s[16]; + uint256 x17 = s[17]; + uint256 x18 = s[18]; + s[1000] = x0 + 2; + s[118] = x18; + s[117] = x17; + s[116] = x16; + s[115] = x15; + s[114] = x14; + s[113] = x13; + s[112] = x12; + s[111] = x11; + s[110] = x10; + s[109] = x9; + s[108] = x8; + s[107] = x7; + s[106] = x6; + s[105] = x5; + s[104] = x4; + s[103] = x3; + s[102] = x2; + s[101] = x1; + s[100] = x0; + } + function test() public view returns(uint256) { + return s[1000]; + } +} +// ==== +// compileViaYul: true +// ---- +// f() -> 0x2a +// test() -> 2 diff --git a/test/libyul/CompilabilityChecker.cpp b/test/libyul/CompilabilityChecker.cpp index ad1a6fd5e..e8890a8bb 100644 --- a/test/libyul/CompilabilityChecker.cpp +++ b/test/libyul/CompilabilityChecker.cpp @@ -39,7 +39,7 @@ string check(string const& _input) Object obj; std::tie(obj.code, obj.analysisInfo) = yul::test::parse(_input, false); BOOST_REQUIRE(obj.code); - map functions = CompilabilityChecker::run(EVMDialect::strictAssemblyForEVM(solidity::test::CommonOptions::get().evmVersion()), obj, true); + auto functions = CompilabilityChecker(EVMDialect::strictAssemblyForEVM(solidity::test::CommonOptions::get().evmVersion()), obj, true).stackDeficit; string out; for (auto const& function: functions) out += function.first.str() + ": " + to_string(function.second) + " "; diff --git a/test/libyul/YulOptimizerTest.cpp b/test/libyul/YulOptimizerTest.cpp index fc5177fe4..e61b5d2d8 100644 --- a/test/libyul/YulOptimizerTest.cpp +++ b/test/libyul/YulOptimizerTest.cpp @@ -46,6 +46,7 @@ #include #include #include +#include #include #include #include @@ -60,6 +61,7 @@ #include #include #include +#include #include #include #include @@ -70,6 +72,7 @@ #include #include #include +#include #include #include @@ -378,6 +381,58 @@ TestCase::TestResult YulOptimizerTest::run(ostream& _stream, string const& _line obj.analysisInfo = m_analysisInfo; OptimiserSuite::run(*m_dialect, &meter, obj, true, solidity::frontend::OptimiserSettings::DefaultYulOptimiserSteps); } + else if (m_optimizerStep == "stackLimitEvader") + { + yul::Object obj; + obj.code = m_object->code; + obj.analysisInfo = m_analysisInfo; + disambiguate(); + StackLimitEvader::run(*m_context, obj, CompilabilityChecker{ + *m_dialect, + obj, + true + }.unreachableVariables); + } + else if (m_optimizerStep == "fakeStackLimitEvader") + { + yul::Object obj; + obj.code = m_object->code; + obj.analysisInfo = m_analysisInfo; + disambiguate(); + // Mark all variables with a name starting with "$" for escalation to memory. + struct FakeUnreachableGenerator: ASTWalker + { + map> fakeUnreachables; + using ASTWalker::operator(); + void operator()(FunctionDefinition const& _function) override + { + YulString originalFunctionName = m_currentFunction; + m_currentFunction = _function.name; + ASTWalker::operator()(_function); + m_currentFunction = originalFunctionName; + } + void visitVariableName(YulString _var) + { + if (!_var.empty() && _var.str().front() == '$') + fakeUnreachables[m_currentFunction].insert(_var); + } + void operator()(VariableDeclaration const& _varDecl) override + { + for (auto const& var: _varDecl.variables) + visitVariableName(var.name); + ASTWalker::operator()(_varDecl); + } + void operator()(Identifier const& _identifier) override + { + visitVariableName(_identifier.name); + ASTWalker::operator()(_identifier); + } + YulString m_currentFunction = YulString{}; + }; + FakeUnreachableGenerator fakeUnreachableGenerator; + fakeUnreachableGenerator(*obj.code); + StackLimitEvader::run(*m_context, obj, fakeUnreachableGenerator.fakeUnreachables); + } else { AnsiColorized(_stream, _formatted, {formatting::BOLD, formatting::RED}) << _linePrefix << "Invalid optimizer step: " << m_optimizerStep << endl; diff --git a/test/libyul/ewasmTranslationTests/memoryguard.yul b/test/libyul/ewasmTranslationTests/memoryguard.yul new file mode 100644 index 000000000..5aafc88d2 --- /dev/null +++ b/test/libyul/ewasmTranslationTests/memoryguard.yul @@ -0,0 +1,12 @@ +{ + mstore(0x40, memoryguard(0x0102030405060708)) + sstore(1, mload(0x40)) +} +// ---- +// Trace: +// Memory dump: +// 0: 0000000000000000000000000000000000000000000000000000000000000001 +// 20: 0000000000000000000000000000000000000000000000000102030405060708 +// 80: 0000000000000000000000000000000000000000000000000102030405060708 +// Storage dump: +// 0000000000000000000000000000000000000000000000000000000000000001: 0000000000000000000000000000000000000000000000000102030405060708 diff --git a/test/libyul/yulOptimizerTests/fakeStackLimitEvader/connected.yul b/test/libyul/yulOptimizerTests/fakeStackLimitEvader/connected.yul new file mode 100644 index 000000000..075da88cd --- /dev/null +++ b/test/libyul/yulOptimizerTests/fakeStackLimitEvader/connected.yul @@ -0,0 +1,62 @@ +{ + mstore(0x40, memoryguard(0)) + function g() -> a, b { + a := 21 + let $c := 1 + b,a,$c := z() + } + function f() -> x { + let $x2 + $x2 := 42 + let $x3, $x4 := g() + x := mul(add($x2, $x3), h($x4)) + sstore($x3, $x4) + } + function h(v) -> a { + let x, $z, y := z() + a, $z, v := z() + } + function z() -> a,b,c { let $x := 0 } + sstore(0, f()) + let x, y := g() +} +// ---- +// step: fakeStackLimitEvader +// +// { +// mstore(0x40, memoryguard(0xa0)) +// function g() -> a, b +// { +// a := 21 +// mstore(0x20, 1) +// let b_1, a_2, $c_3 := z() +// mstore(0x20, $c_3) +// a := a_2 +// b := b_1 +// } +// function f() -> x +// { +// mstore(0x60, 0) +// mstore(0x60, 42) +// let $x3_4, $x4_5 := g() +// mstore(0x80, $x4_5) +// mstore(0x40, $x3_4) +// x := mul(add(mload(0x60), mload(0x40)), h(mload(0x80))) +// sstore(mload(0x40), mload(0x80)) +// } +// function h(v) -> a_1 +// { +// let x_2_6, $z_7, y_8 := z() +// mstore(0x20, $z_7) +// let y := y_8 +// let x_2 := x_2_6 +// let a_1_9, $z_10, v_11 := z() +// mstore(0x20, $z_10) +// v := v_11 +// a_1 := a_1_9 +// } +// function z() -> a_3, b_4, c +// { mstore(0x00, 0) } +// sstore(0, f()) +// let x_5, y_6 := g() +// } diff --git a/test/libyul/yulOptimizerTests/fakeStackLimitEvader/function_arg.yul b/test/libyul/yulOptimizerTests/fakeStackLimitEvader/function_arg.yul new file mode 100644 index 000000000..033e0b3c0 --- /dev/null +++ b/test/libyul/yulOptimizerTests/fakeStackLimitEvader/function_arg.yul @@ -0,0 +1,20 @@ +{ + mstore(0x40, memoryguard(0)) + let $x := 0 + sstore(0, $x) + function h($hx) -> y { + y := $hx + } + sstore(1, h(32)) +} +// ---- +// step: fakeStackLimitEvader +// +// { +// mstore(0x40, memoryguard(0x40)) +// mstore(0x20, 0) +// sstore(0, mload(0x20)) +// function h($hx) -> y +// { y := $hx } +// sstore(1, h(32)) +// } diff --git a/test/libyul/yulOptimizerTests/fakeStackLimitEvader/outer_block.yul b/test/libyul/yulOptimizerTests/fakeStackLimitEvader/outer_block.yul new file mode 100644 index 000000000..023100cdd --- /dev/null +++ b/test/libyul/yulOptimizerTests/fakeStackLimitEvader/outer_block.yul @@ -0,0 +1,13 @@ +{ + mstore(0x40, memoryguard(0x80)) + let $x := 42 + sstore(42, $x) +} +// ---- +// step: fakeStackLimitEvader +// +// { +// mstore(0x40, memoryguard(0xa0)) +// mstore(0x80, 42) +// sstore(42, mload(0x80)) +// } diff --git a/test/libyul/yulOptimizerTests/fakeStackLimitEvader/stub.yul b/test/libyul/yulOptimizerTests/fakeStackLimitEvader/stub.yul new file mode 100644 index 000000000..9c792e63f --- /dev/null +++ b/test/libyul/yulOptimizerTests/fakeStackLimitEvader/stub.yul @@ -0,0 +1,88 @@ +{ + mstore(0x40, memoryguard(0)) + function f() { + let $fx + let $fy := 42 + sstore($fx, $fy) + $fx := 21 + } + function g(gx) { + let $gx, $gy := tuple2() + { $gx, $gy := tuple2() } + { $gx, gx := tuple2() } + { gx, $gy := tuple2() } + } + function h(hx, hy, hz, hw) { + let $hx, $hy, $hz, $hw := tuple4() + { hx, $hy, hz, $hw := tuple4() } + { $hx, $hy, hz, hw := tuple4() } + } + function tuple2() -> a, b {} + function tuple4() -> a, b, c, d {} + f() + g(0) + h(1, 2, 3, 4) +} +// ---- +// step: fakeStackLimitEvader +// +// { +// mstore(0x40, memoryguard(0x80)) +// function f() +// { +// mstore(0x20, 0) +// mstore(0x00, 42) +// sstore(mload(0x20), mload(0x00)) +// mstore(0x20, 21) +// } +// function g(gx) +// { +// let $gx_1, $gy_2 := tuple2() +// mstore(0x20, $gy_2) +// mstore(0x00, $gx_1) +// { +// let $gx_3, $gy_4 := tuple2() +// mstore(0x20, $gy_4) +// mstore(0x00, $gx_3) +// } +// { +// let $gx_5, gx_6 := tuple2() +// mstore(0x00, $gx_5) +// gx := gx_6 +// } +// { +// let gx_7, $gy_8 := tuple2() +// mstore(0x20, $gy_8) +// gx := gx_7 +// } +// } +// function h(hx, hy, hz, hw) +// { +// let $hx_9, $hy_10, $hz_11, $hw_12 := tuple4() +// mstore(0x60, $hw_12) +// mstore(0x00, $hz_11) +// mstore(0x20, $hy_10) +// mstore(0x40, $hx_9) +// { +// let hx_13, $hy_14, hz_15, $hw_16 := tuple4() +// mstore(0x60, $hw_16) +// mstore(0x20, $hy_14) +// hz := hz_15 +// hx := hx_13 +// } +// { +// let $hx_17, $hy_18, hz_19, hw_20 := tuple4() +// mstore(0x20, $hy_18) +// mstore(0x40, $hx_17) +// hw := hw_20 +// hz := hz_19 +// } +// } +// function tuple2() -> a, b +// { } +// function tuple4() -> a_1, b_2, c, d +// { } +// f() +// g(0) +// h(1, 2, 3, 4) +// } diff --git a/test/libyul/yulOptimizerTests/stackLimitEvader/cycle.yul b/test/libyul/yulOptimizerTests/stackLimitEvader/cycle.yul new file mode 100644 index 000000000..37da7f9b8 --- /dev/null +++ b/test/libyul/yulOptimizerTests/stackLimitEvader/cycle.yul @@ -0,0 +1,95 @@ +{ + mstore(0x40, memoryguard(128)) + sstore(0, g(sload(3))) + function g(x) -> v { + v := f() + } + function f() -> v { + let a1 := calldataload(mul(1,4)) + let a2 := calldataload(mul(2,4)) + let a3 := calldataload(mul(3,4)) + let a4 := calldataload(mul(4,4)) + let a5 := calldataload(mul(5,4)) + let a6 := calldataload(mul(6,4)) + let a7 := calldataload(mul(7,4)) + let a8 := calldataload(mul(8,4)) + let a9 := calldataload(mul(9,4)) + a1 := calldataload(mul(0,4)) + let a10 := calldataload(mul(10,4)) + let a11 := calldataload(mul(11,4)) + let a12 := calldataload(mul(12,4)) + let a13 := calldataload(mul(13,4)) + let a14 := calldataload(mul(14,4)) + let a15 := calldataload(mul(15,4)) + let a16 := calldataload(mul(16,4)) + let a17 := calldataload(mul(17,4)) + sstore(0, a1) + sstore(mul(17,4), a17) + sstore(mul(16,4), a16) + sstore(mul(15,4), a15) + sstore(mul(14,4), a14) + sstore(mul(13,4), a13) + sstore(mul(12,4), a12) + sstore(mul(11,4), a11) + sstore(mul(10,4), a10) + sstore(mul(9,4), a9) + sstore(mul(8,4), a8) + sstore(mul(7,4), a7) + sstore(mul(6,4), a6) + sstore(mul(5,4), a5) + sstore(mul(4,4), a4) + sstore(mul(3,4), a3) + sstore(mul(2,4), a2) + sstore(mul(1,4), a1) + sstore(23, g(sload(42))) + } +} +// ---- +// step: stackLimitEvader +// +// { +// mstore(0x40, memoryguard(128)) +// sstore(0, g(sload(3))) +// function g(x) -> v +// { v := f() } +// function f() -> v_1 +// { +// let a1 := calldataload(mul(1, 4)) +// let a2 := calldataload(mul(2, 4)) +// let a3 := calldataload(mul(3, 4)) +// let a4 := calldataload(mul(4, 4)) +// let a5 := calldataload(mul(5, 4)) +// let a6 := calldataload(mul(6, 4)) +// let a7 := calldataload(mul(7, 4)) +// let a8 := calldataload(mul(8, 4)) +// let a9 := calldataload(mul(9, 4)) +// a1 := calldataload(mul(0, 4)) +// let a10 := calldataload(mul(10, 4)) +// let a11 := calldataload(mul(11, 4)) +// let a12 := calldataload(mul(12, 4)) +// let a13 := calldataload(mul(13, 4)) +// let a14 := calldataload(mul(14, 4)) +// let a15 := calldataload(mul(15, 4)) +// let a16 := calldataload(mul(16, 4)) +// let a17 := calldataload(mul(17, 4)) +// sstore(0, a1) +// sstore(mul(17, 4), a17) +// sstore(mul(16, 4), a16) +// sstore(mul(15, 4), a15) +// sstore(mul(14, 4), a14) +// sstore(mul(13, 4), a13) +// sstore(mul(12, 4), a12) +// sstore(mul(11, 4), a11) +// sstore(mul(10, 4), a10) +// sstore(mul(9, 4), a9) +// sstore(mul(8, 4), a8) +// sstore(mul(7, 4), a7) +// sstore(mul(6, 4), a6) +// sstore(mul(5, 4), a5) +// sstore(mul(4, 4), a4) +// sstore(mul(3, 4), a3) +// sstore(mul(2, 4), a2) +// sstore(mul(1, 4), a1) +// sstore(23, g(sload(42))) +// } +// } diff --git a/test/libyul/yulOptimizerTests/stackLimitEvader/cycle_after.yul b/test/libyul/yulOptimizerTests/stackLimitEvader/cycle_after.yul new file mode 100644 index 000000000..4480d012e --- /dev/null +++ b/test/libyul/yulOptimizerTests/stackLimitEvader/cycle_after.yul @@ -0,0 +1,95 @@ +{ + mstore(0x40, memoryguard(128)) + sstore(0, f()) + function f() -> v { + let a1 := calldataload(mul(1,4)) + let a2 := calldataload(mul(2,4)) + let a3 := calldataload(mul(3,4)) + let a4 := calldataload(mul(4,4)) + let a5 := calldataload(mul(5,4)) + let a6 := calldataload(mul(6,4)) + let a7 := calldataload(mul(7,4)) + let a8 := calldataload(mul(8,4)) + let a9 := calldataload(mul(9,4)) + a1 := calldataload(mul(0,4)) + let a10 := calldataload(mul(10,4)) + let a11 := calldataload(mul(11,4)) + let a12 := calldataload(mul(12,4)) + let a13 := calldataload(mul(13,4)) + let a14 := calldataload(mul(14,4)) + let a15 := calldataload(mul(15,4)) + let a16 := calldataload(mul(16,4)) + let a17 := calldataload(mul(17,4)) + sstore(0, a1) + sstore(mul(17,4), a17) + sstore(mul(16,4), a16) + sstore(mul(15,4), a15) + sstore(mul(14,4), a14) + sstore(mul(13,4), a13) + sstore(mul(12,4), a12) + sstore(mul(11,4), a11) + sstore(mul(10,4), a10) + sstore(mul(9,4), a9) + sstore(mul(8,4), a8) + sstore(mul(7,4), a7) + sstore(mul(6,4), a6) + sstore(mul(5,4), a5) + sstore(mul(4,4), a4) + sstore(mul(3,4), a3) + sstore(mul(2,4), a2) + sstore(mul(1,4), a1) + sstore(23, h()) + } + function h() -> v { + v := h() + } +} +// ---- +// step: stackLimitEvader +// +// { +// mstore(0x40, memoryguard(0xa0)) +// sstore(0, f()) +// function f() -> v +// { +// mstore(0x80, calldataload(mul(1, 4))) +// let a2 := calldataload(mul(2, 4)) +// let a3 := calldataload(mul(3, 4)) +// let a4 := calldataload(mul(4, 4)) +// let a5 := calldataload(mul(5, 4)) +// let a6 := calldataload(mul(6, 4)) +// let a7 := calldataload(mul(7, 4)) +// let a8 := calldataload(mul(8, 4)) +// let a9 := calldataload(mul(9, 4)) +// mstore(0x80, calldataload(mul(0, 4))) +// let a10 := calldataload(mul(10, 4)) +// let a11 := calldataload(mul(11, 4)) +// let a12 := calldataload(mul(12, 4)) +// let a13 := calldataload(mul(13, 4)) +// let a14 := calldataload(mul(14, 4)) +// let a15 := calldataload(mul(15, 4)) +// let a16 := calldataload(mul(16, 4)) +// let a17 := calldataload(mul(17, 4)) +// sstore(0, mload(0x80)) +// sstore(mul(17, 4), a17) +// sstore(mul(16, 4), a16) +// sstore(mul(15, 4), a15) +// sstore(mul(14, 4), a14) +// sstore(mul(13, 4), a13) +// sstore(mul(12, 4), a12) +// sstore(mul(11, 4), a11) +// sstore(mul(10, 4), a10) +// sstore(mul(9, 4), a9) +// sstore(mul(8, 4), a8) +// sstore(mul(7, 4), a7) +// sstore(mul(6, 4), a6) +// sstore(mul(5, 4), a5) +// sstore(mul(4, 4), a4) +// sstore(mul(3, 4), a3) +// sstore(mul(2, 4), a2) +// sstore(mul(1, 4), mload(0x80)) +// sstore(23, h()) +// } +// function h() -> v_1 +// { v_1 := h() } +// } diff --git a/test/libyul/yulOptimizerTests/stackLimitEvader/cycle_after_2.yul b/test/libyul/yulOptimizerTests/stackLimitEvader/cycle_after_2.yul new file mode 100644 index 000000000..4468165f1 --- /dev/null +++ b/test/libyul/yulOptimizerTests/stackLimitEvader/cycle_after_2.yul @@ -0,0 +1,100 @@ +{ + mstore(0x40, memoryguard(128)) + sstore(0, f()) + function f() -> v { + let a1 := calldataload(mul(1,4)) + let a2 := calldataload(mul(2,4)) + let a3 := calldataload(mul(3,4)) + let a4 := calldataload(mul(4,4)) + let a5 := calldataload(mul(5,4)) + let a6 := calldataload(mul(6,4)) + let a7 := calldataload(mul(7,4)) + let a8 := calldataload(mul(8,4)) + let a9 := calldataload(mul(9,4)) + a1 := calldataload(mul(0,4)) + let a10 := calldataload(mul(10,4)) + let a11 := calldataload(mul(11,4)) + let a12 := calldataload(mul(12,4)) + let a13 := calldataload(mul(13,4)) + let a14 := calldataload(mul(14,4)) + let a15 := calldataload(mul(15,4)) + let a16 := calldataload(mul(16,4)) + let a17 := calldataload(mul(17,4)) + sstore(0, a1) + sstore(mul(17,4), a17) + sstore(mul(16,4), a16) + sstore(mul(15,4), a15) + sstore(mul(14,4), a14) + sstore(mul(13,4), a13) + sstore(mul(12,4), a12) + sstore(mul(11,4), a11) + sstore(mul(10,4), a10) + sstore(mul(9,4), a9) + sstore(mul(8,4), a8) + sstore(mul(7,4), a7) + sstore(mul(6,4), a6) + sstore(mul(5,4), a5) + sstore(mul(4,4), a4) + sstore(mul(3,4), a3) + sstore(mul(2,4), a2) + sstore(mul(1,4), a1) + sstore(23, h()) + } + function h() -> v { + v := i() + } + function i() -> v { + v := h() + } +} +// ---- +// step: stackLimitEvader +// +// { +// mstore(0x40, memoryguard(0xa0)) +// sstore(0, f()) +// function f() -> v +// { +// mstore(0x80, calldataload(mul(1, 4))) +// let a2 := calldataload(mul(2, 4)) +// let a3 := calldataload(mul(3, 4)) +// let a4 := calldataload(mul(4, 4)) +// let a5 := calldataload(mul(5, 4)) +// let a6 := calldataload(mul(6, 4)) +// let a7 := calldataload(mul(7, 4)) +// let a8 := calldataload(mul(8, 4)) +// let a9 := calldataload(mul(9, 4)) +// mstore(0x80, calldataload(mul(0, 4))) +// let a10 := calldataload(mul(10, 4)) +// let a11 := calldataload(mul(11, 4)) +// let a12 := calldataload(mul(12, 4)) +// let a13 := calldataload(mul(13, 4)) +// let a14 := calldataload(mul(14, 4)) +// let a15 := calldataload(mul(15, 4)) +// let a16 := calldataload(mul(16, 4)) +// let a17 := calldataload(mul(17, 4)) +// sstore(0, mload(0x80)) +// sstore(mul(17, 4), a17) +// sstore(mul(16, 4), a16) +// sstore(mul(15, 4), a15) +// sstore(mul(14, 4), a14) +// sstore(mul(13, 4), a13) +// sstore(mul(12, 4), a12) +// sstore(mul(11, 4), a11) +// sstore(mul(10, 4), a10) +// sstore(mul(9, 4), a9) +// sstore(mul(8, 4), a8) +// sstore(mul(7, 4), a7) +// sstore(mul(6, 4), a6) +// sstore(mul(5, 4), a5) +// sstore(mul(4, 4), a4) +// sstore(mul(3, 4), a3) +// sstore(mul(2, 4), a2) +// sstore(mul(1, 4), mload(0x80)) +// sstore(23, h()) +// } +// function h() -> v_1 +// { v_1 := i() } +// function i() -> v_2 +// { v_2 := h() } +// } diff --git a/test/libyul/yulOptimizerTests/stackLimitEvader/cycle_before.yul b/test/libyul/yulOptimizerTests/stackLimitEvader/cycle_before.yul new file mode 100644 index 000000000..e6819ea90 --- /dev/null +++ b/test/libyul/yulOptimizerTests/stackLimitEvader/cycle_before.yul @@ -0,0 +1,103 @@ +{ + mstore(0x40, memoryguard(128)) + sstore(0, g(sload(3))) + function g(x) -> v { + switch lt(x, 3) + case 0 { + v := f() + } + case 1 { + v := g(sub(x,1)) + } + } + function f() -> v { + let a1 := calldataload(mul(1,4)) + let a2 := calldataload(mul(2,4)) + let a3 := calldataload(mul(3,4)) + let a4 := calldataload(mul(4,4)) + let a5 := calldataload(mul(5,4)) + let a6 := calldataload(mul(6,4)) + let a7 := calldataload(mul(7,4)) + let a8 := calldataload(mul(8,4)) + let a9 := calldataload(mul(9,4)) + a1 := calldataload(mul(0,4)) + let a10 := calldataload(mul(10,4)) + let a11 := calldataload(mul(11,4)) + let a12 := calldataload(mul(12,4)) + let a13 := calldataload(mul(13,4)) + let a14 := calldataload(mul(14,4)) + let a15 := calldataload(mul(15,4)) + let a16 := calldataload(mul(16,4)) + let a17 := calldataload(mul(17,4)) + sstore(0, a1) + sstore(mul(17,4), a17) + sstore(mul(16,4), a16) + sstore(mul(15,4), a15) + sstore(mul(14,4), a14) + sstore(mul(13,4), a13) + sstore(mul(12,4), a12) + sstore(mul(11,4), a11) + sstore(mul(10,4), a10) + sstore(mul(9,4), a9) + sstore(mul(8,4), a8) + sstore(mul(7,4), a7) + sstore(mul(6,4), a6) + sstore(mul(5,4), a5) + sstore(mul(4,4), a4) + sstore(mul(3,4), a3) + sstore(mul(2,4), a2) + sstore(mul(1,4), a1) + } +} +// ---- +// step: stackLimitEvader +// +// { +// mstore(0x40, memoryguard(0xa0)) +// sstore(0, g(sload(3))) +// function g(x) -> v +// { +// switch lt(x, 3) +// case 0 { v := f() } +// case 1 { v := g(sub(x, 1)) } +// } +// function f() -> v_1 +// { +// mstore(0x80, calldataload(mul(1, 4))) +// let a2 := calldataload(mul(2, 4)) +// let a3 := calldataload(mul(3, 4)) +// let a4 := calldataload(mul(4, 4)) +// let a5 := calldataload(mul(5, 4)) +// let a6 := calldataload(mul(6, 4)) +// let a7 := calldataload(mul(7, 4)) +// let a8 := calldataload(mul(8, 4)) +// let a9 := calldataload(mul(9, 4)) +// mstore(0x80, calldataload(mul(0, 4))) +// let a10 := calldataload(mul(10, 4)) +// let a11 := calldataload(mul(11, 4)) +// let a12 := calldataload(mul(12, 4)) +// let a13 := calldataload(mul(13, 4)) +// let a14 := calldataload(mul(14, 4)) +// let a15 := calldataload(mul(15, 4)) +// let a16 := calldataload(mul(16, 4)) +// let a17 := calldataload(mul(17, 4)) +// sstore(0, mload(0x80)) +// sstore(mul(17, 4), a17) +// sstore(mul(16, 4), a16) +// sstore(mul(15, 4), a15) +// sstore(mul(14, 4), a14) +// sstore(mul(13, 4), a13) +// sstore(mul(12, 4), a12) +// sstore(mul(11, 4), a11) +// sstore(mul(10, 4), a10) +// sstore(mul(9, 4), a9) +// sstore(mul(8, 4), a8) +// sstore(mul(7, 4), a7) +// sstore(mul(6, 4), a6) +// sstore(mul(5, 4), a5) +// sstore(mul(4, 4), a4) +// sstore(mul(3, 4), a3) +// sstore(mul(2, 4), a2) +// sstore(mul(1, 4), mload(0x80)) +// } +// } diff --git a/test/libyul/yulOptimizerTests/stackLimitEvader/cycle_before_2.yul b/test/libyul/yulOptimizerTests/stackLimitEvader/cycle_before_2.yul new file mode 100644 index 000000000..a0259b180 --- /dev/null +++ b/test/libyul/yulOptimizerTests/stackLimitEvader/cycle_before_2.yul @@ -0,0 +1,108 @@ +{ + mstore(0x40, memoryguard(128)) + sstore(0, g(sload(3))) + function g(x) -> v { + switch lt(x, 3) + case 0 { + v := h(x) + } + case 1 { + v := g(sub(x,f())) + } + } + function h(x) -> v { + v := g(x) + } + function f() -> v { + let a1 := calldataload(mul(1,4)) + let a2 := calldataload(mul(2,4)) + let a3 := calldataload(mul(3,4)) + let a4 := calldataload(mul(4,4)) + let a5 := calldataload(mul(5,4)) + let a6 := calldataload(mul(6,4)) + let a7 := calldataload(mul(7,4)) + let a8 := calldataload(mul(8,4)) + let a9 := calldataload(mul(9,4)) + a1 := calldataload(mul(0,4)) + let a10 := calldataload(mul(10,4)) + let a11 := calldataload(mul(11,4)) + let a12 := calldataload(mul(12,4)) + let a13 := calldataload(mul(13,4)) + let a14 := calldataload(mul(14,4)) + let a15 := calldataload(mul(15,4)) + let a16 := calldataload(mul(16,4)) + let a17 := calldataload(mul(17,4)) + sstore(0, a1) + sstore(mul(17,4), a17) + sstore(mul(16,4), a16) + sstore(mul(15,4), a15) + sstore(mul(14,4), a14) + sstore(mul(13,4), a13) + sstore(mul(12,4), a12) + sstore(mul(11,4), a11) + sstore(mul(10,4), a10) + sstore(mul(9,4), a9) + sstore(mul(8,4), a8) + sstore(mul(7,4), a7) + sstore(mul(6,4), a6) + sstore(mul(5,4), a5) + sstore(mul(4,4), a4) + sstore(mul(3,4), a3) + sstore(mul(2,4), a2) + sstore(mul(1,4), a1) + } +} +// ---- +// step: stackLimitEvader +// +// { +// mstore(0x40, memoryguard(0xa0)) +// sstore(0, g(sload(3))) +// function g(x) -> v +// { +// switch lt(x, 3) +// case 0 { v := h(x) } +// case 1 { v := g(sub(x, f())) } +// } +// function h(x_1) -> v_2 +// { v_2 := g(x_1) } +// function f() -> v_3 +// { +// mstore(0x80, calldataload(mul(1, 4))) +// let a2 := calldataload(mul(2, 4)) +// let a3 := calldataload(mul(3, 4)) +// let a4 := calldataload(mul(4, 4)) +// let a5 := calldataload(mul(5, 4)) +// let a6 := calldataload(mul(6, 4)) +// let a7 := calldataload(mul(7, 4)) +// let a8 := calldataload(mul(8, 4)) +// let a9 := calldataload(mul(9, 4)) +// mstore(0x80, calldataload(mul(0, 4))) +// let a10 := calldataload(mul(10, 4)) +// let a11 := calldataload(mul(11, 4)) +// let a12 := calldataload(mul(12, 4)) +// let a13 := calldataload(mul(13, 4)) +// let a14 := calldataload(mul(14, 4)) +// let a15 := calldataload(mul(15, 4)) +// let a16 := calldataload(mul(16, 4)) +// let a17 := calldataload(mul(17, 4)) +// sstore(0, mload(0x80)) +// sstore(mul(17, 4), a17) +// sstore(mul(16, 4), a16) +// sstore(mul(15, 4), a15) +// sstore(mul(14, 4), a14) +// sstore(mul(13, 4), a13) +// sstore(mul(12, 4), a12) +// sstore(mul(11, 4), a11) +// sstore(mul(10, 4), a10) +// sstore(mul(9, 4), a9) +// sstore(mul(8, 4), a8) +// sstore(mul(7, 4), a7) +// sstore(mul(6, 4), a6) +// sstore(mul(5, 4), a5) +// sstore(mul(4, 4), a4) +// sstore(mul(3, 4), a3) +// sstore(mul(2, 4), a2) +// sstore(mul(1, 4), mload(0x80)) +// } +// } diff --git a/test/libyul/yulOptimizerTests/stackLimitEvader/cycle_before_after.yul b/test/libyul/yulOptimizerTests/stackLimitEvader/cycle_before_after.yul new file mode 100644 index 000000000..f5c068d5e --- /dev/null +++ b/test/libyul/yulOptimizerTests/stackLimitEvader/cycle_before_after.yul @@ -0,0 +1,110 @@ +{ + mstore(0x40, memoryguard(128)) + sstore(0, g(sload(0))) + function g(x) -> v { + switch lt(x, 3) + case 0 { + v := f() + } + case 1 { + v := g(sub(x,1)) + } + } + function f() -> v { + let a1 := calldataload(mul(1,4)) + let a2 := calldataload(mul(2,4)) + let a3 := calldataload(mul(3,4)) + let a4 := calldataload(mul(4,4)) + let a5 := calldataload(mul(5,4)) + let a6 := calldataload(mul(6,4)) + let a7 := calldataload(mul(7,4)) + let a8 := calldataload(mul(8,4)) + let a9 := calldataload(mul(9,4)) + a1 := calldataload(mul(0,4)) + let a10 := calldataload(mul(10,4)) + let a11 := calldataload(mul(11,4)) + let a12 := calldataload(mul(12,4)) + let a13 := calldataload(mul(13,4)) + let a14 := calldataload(mul(14,4)) + let a15 := calldataload(mul(15,4)) + let a16 := calldataload(mul(16,4)) + let a17 := calldataload(mul(17,4)) + sstore(0, a1) + sstore(mul(17,4), a17) + sstore(mul(16,4), a16) + sstore(mul(15,4), a15) + sstore(mul(14,4), a14) + sstore(mul(13,4), a13) + sstore(mul(12,4), a12) + sstore(mul(11,4), a11) + sstore(mul(10,4), a10) + sstore(mul(9,4), a9) + sstore(mul(8,4), a8) + sstore(mul(7,4), a7) + sstore(mul(6,4), a6) + sstore(mul(5,4), a5) + sstore(mul(4,4), a4) + sstore(mul(3,4), a3) + sstore(mul(2,4), a2) + sstore(mul(1,4), a1) + sstore(23, h()) + } + function h() -> v { + v := h() + } +} +// ---- +// step: stackLimitEvader +// +// { +// mstore(0x40, memoryguard(0xa0)) +// sstore(0, g(sload(0))) +// function g(x) -> v +// { +// switch lt(x, 3) +// case 0 { v := f() } +// case 1 { v := g(sub(x, 1)) } +// } +// function f() -> v_1 +// { +// mstore(0x80, calldataload(mul(1, 4))) +// let a2 := calldataload(mul(2, 4)) +// let a3 := calldataload(mul(3, 4)) +// let a4 := calldataload(mul(4, 4)) +// let a5 := calldataload(mul(5, 4)) +// let a6 := calldataload(mul(6, 4)) +// let a7 := calldataload(mul(7, 4)) +// let a8 := calldataload(mul(8, 4)) +// let a9 := calldataload(mul(9, 4)) +// mstore(0x80, calldataload(mul(0, 4))) +// let a10 := calldataload(mul(10, 4)) +// let a11 := calldataload(mul(11, 4)) +// let a12 := calldataload(mul(12, 4)) +// let a13 := calldataload(mul(13, 4)) +// let a14 := calldataload(mul(14, 4)) +// let a15 := calldataload(mul(15, 4)) +// let a16 := calldataload(mul(16, 4)) +// let a17 := calldataload(mul(17, 4)) +// sstore(0, mload(0x80)) +// sstore(mul(17, 4), a17) +// sstore(mul(16, 4), a16) +// sstore(mul(15, 4), a15) +// sstore(mul(14, 4), a14) +// sstore(mul(13, 4), a13) +// sstore(mul(12, 4), a12) +// sstore(mul(11, 4), a11) +// sstore(mul(10, 4), a10) +// sstore(mul(9, 4), a9) +// sstore(mul(8, 4), a8) +// sstore(mul(7, 4), a7) +// sstore(mul(6, 4), a6) +// sstore(mul(5, 4), a5) +// sstore(mul(4, 4), a4) +// sstore(mul(3, 4), a3) +// sstore(mul(2, 4), a2) +// sstore(mul(1, 4), mload(0x80)) +// sstore(23, h()) +// } +// function h() -> v_2 +// { v_2 := h() } +// } diff --git a/test/libyul/yulOptimizerTests/stackLimitEvader/function_arg.yul b/test/libyul/yulOptimizerTests/stackLimitEvader/function_arg.yul new file mode 100644 index 000000000..7bc4d2e80 --- /dev/null +++ b/test/libyul/yulOptimizerTests/stackLimitEvader/function_arg.yul @@ -0,0 +1,88 @@ +{ + { + mstore(0x40, memoryguard(128)) + sstore(0, f(0)) + } + function f(a1) -> v { + let a2 := calldataload(mul(2,4)) + let a3 := calldataload(mul(3,4)) + let a4 := calldataload(mul(4,4)) + let a5 := calldataload(mul(5,4)) + let a6 := calldataload(mul(6,4)) + let a7 := calldataload(mul(7,4)) + let a8 := calldataload(mul(8,4)) + let a9 := calldataload(mul(9,4)) + let a10 := calldataload(mul(10,4)) + let a11 := calldataload(mul(11,4)) + let a12 := calldataload(mul(12,4)) + let a13 := calldataload(mul(13,4)) + let a14 := calldataload(mul(14,4)) + let a15 := calldataload(mul(15,4)) + let a16 := calldataload(mul(16,4)) + let a17 := calldataload(mul(17,4)) + sstore(0, a1) + sstore(mul(17,4), a17) + sstore(mul(16,4), a16) + sstore(mul(15,4), a15) + sstore(mul(14,4), a14) + sstore(mul(13,4), a13) + sstore(mul(12,4), a12) + sstore(mul(11,4), a11) + sstore(mul(10,4), a10) + sstore(mul(9,4), a9) + sstore(mul(8,4), a8) + sstore(mul(7,4), a7) + sstore(mul(6,4), a6) + sstore(mul(5,4), a5) + sstore(mul(4,4), a4) + sstore(mul(3,4), a3) + sstore(mul(2,4), a2) + sstore(mul(1,4), a1) + } +} +// ---- +// step: stackLimitEvader +// +// { +// { +// mstore(0x40, memoryguard(0xa0)) +// sstore(0, f(0)) +// } +// function f(a1) -> v +// { +// let a2 := calldataload(mul(2, 4)) +// let a3 := calldataload(mul(3, 4)) +// let a4 := calldataload(mul(4, 4)) +// let a5 := calldataload(mul(5, 4)) +// let a6 := calldataload(mul(6, 4)) +// let a7 := calldataload(mul(7, 4)) +// let a8 := calldataload(mul(8, 4)) +// let a9 := calldataload(mul(9, 4)) +// let a10 := calldataload(mul(10, 4)) +// let a11 := calldataload(mul(11, 4)) +// let a12 := calldataload(mul(12, 4)) +// let a13 := calldataload(mul(13, 4)) +// let a14 := calldataload(mul(14, 4)) +// let a15 := calldataload(mul(15, 4)) +// let a16 := calldataload(mul(16, 4)) +// let a17 := calldataload(mul(17, 4)) +// sstore(0, a1) +// sstore(mul(17, 4), a17) +// sstore(mul(16, 4), a16) +// sstore(mul(15, 4), a15) +// sstore(mul(14, 4), a14) +// sstore(mul(13, 4), a13) +// sstore(mul(12, 4), a12) +// sstore(mul(11, 4), a11) +// sstore(mul(10, 4), a10) +// sstore(mul(9, 4), a9) +// sstore(mul(8, 4), a8) +// sstore(mul(7, 4), a7) +// sstore(mul(6, 4), a6) +// sstore(mul(5, 4), a5) +// sstore(mul(4, 4), a4) +// sstore(mul(3, 4), a3) +// sstore(mul(2, 4), a2) +// sstore(mul(1, 4), a1) +// } +// } diff --git a/test/libyul/yulOptimizerTests/stackLimitEvader/stub.yul b/test/libyul/yulOptimizerTests/stackLimitEvader/stub.yul new file mode 100644 index 000000000..ce12229b6 --- /dev/null +++ b/test/libyul/yulOptimizerTests/stackLimitEvader/stub.yul @@ -0,0 +1,98 @@ +{ + { + mstore(0x40, memoryguard(128)) + sstore(g(1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16), f()) + } + function g(b1, b2, b3, b4, b5, b6, b7, b8, b9, b10, b11, b12, b13, b14, b15, b16) -> v { + // Should be, but cannot yet be escalated. + v := b16 + } + function f() -> v{ + let a1 := calldataload(mul(1,4)) + let a2 := calldataload(mul(2,4)) + let a3 := calldataload(mul(3,4)) + let a4 := calldataload(mul(4,4)) + let a5 := calldataload(mul(5,4)) + let a6 := calldataload(mul(6,4)) + let a7 := calldataload(mul(7,4)) + let a8 := calldataload(mul(8,4)) + let a9 := calldataload(mul(9,4)) + a1 := calldataload(mul(0,4)) + let a10 := calldataload(mul(10,4)) + let a11 := calldataload(mul(11,4)) + let a12 := calldataload(mul(12,4)) + let a13 := calldataload(mul(13,4)) + let a14 := calldataload(mul(14,4)) + let a15 := calldataload(mul(15,4)) + let a16 := calldataload(mul(16,4)) + let a17 := calldataload(mul(17,4)) + sstore(0, a1) + sstore(mul(17,4), a17) + sstore(mul(16,4), a16) + sstore(mul(15,4), a15) + sstore(mul(14,4), a14) + sstore(mul(13,4), a13) + sstore(mul(12,4), a12) + sstore(mul(11,4), a11) + sstore(mul(10,4), a10) + sstore(mul(9,4), a9) + sstore(mul(8,4), a8) + sstore(mul(7,4), a7) + sstore(mul(6,4), a6) + sstore(mul(5,4), a5) + sstore(mul(4,4), a4) + sstore(mul(3,4), a3) + sstore(mul(2,4), a2) + sstore(mul(1,4), a1) + } +} +// ---- +// step: stackLimitEvader +// +// { +// { +// mstore(0x40, memoryguard(0xa0)) +// sstore(g(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16), f()) +// } +// function g(b1, b2, b3, b4, b5, b6, b7, b8, b9, b10, b11, b12, b13, b14, b15, b16) -> v +// { v := b16 } +// function f() -> v_1 +// { +// mstore(0x80, calldataload(mul(1, 4))) +// let a2 := calldataload(mul(2, 4)) +// let a3 := calldataload(mul(3, 4)) +// let a4 := calldataload(mul(4, 4)) +// let a5 := calldataload(mul(5, 4)) +// let a6 := calldataload(mul(6, 4)) +// let a7 := calldataload(mul(7, 4)) +// let a8 := calldataload(mul(8, 4)) +// let a9 := calldataload(mul(9, 4)) +// mstore(0x80, calldataload(mul(0, 4))) +// let a10 := calldataload(mul(10, 4)) +// let a11 := calldataload(mul(11, 4)) +// let a12 := calldataload(mul(12, 4)) +// let a13 := calldataload(mul(13, 4)) +// let a14 := calldataload(mul(14, 4)) +// let a15 := calldataload(mul(15, 4)) +// let a16 := calldataload(mul(16, 4)) +// let a17 := calldataload(mul(17, 4)) +// sstore(0, mload(0x80)) +// sstore(mul(17, 4), a17) +// sstore(mul(16, 4), a16) +// sstore(mul(15, 4), a15) +// sstore(mul(14, 4), a14) +// sstore(mul(13, 4), a13) +// sstore(mul(12, 4), a12) +// sstore(mul(11, 4), a11) +// sstore(mul(10, 4), a10) +// sstore(mul(9, 4), a9) +// sstore(mul(8, 4), a8) +// sstore(mul(7, 4), a7) +// sstore(mul(6, 4), a6) +// sstore(mul(5, 4), a5) +// sstore(mul(4, 4), a4) +// sstore(mul(3, 4), a3) +// sstore(mul(2, 4), a2) +// sstore(mul(1, 4), mload(0x80)) +// } +// } diff --git a/test/libyul/yulOptimizerTests/stackLimitEvader/tree.yul b/test/libyul/yulOptimizerTests/stackLimitEvader/tree.yul new file mode 100644 index 000000000..357f6bb68 --- /dev/null +++ b/test/libyul/yulOptimizerTests/stackLimitEvader/tree.yul @@ -0,0 +1,339 @@ +{ + { + mstore(0x40, memoryguard(128)) + sstore(23, f()) + } + function f() -> v{ + let a1 := calldataload(mul(1,4)) + let a2 := calldataload(mul(2,4)) + let a3 := calldataload(mul(3,4)) + let a4 := calldataload(g()) + let a5 := calldataload(mul(5,4)) + let a6 := calldataload(mul(6,4)) + let a7 := calldataload(mul(7,4)) + let a8 := calldataload(mul(8,4)) + let a9 := calldataload(mul(9,4)) + a1 := calldataload(mul(0,4)) + let a10 := calldataload(mul(10,4)) + let a11 := calldataload(mul(11,4)) + let a12 := calldataload(mul(12,4)) + let a13 := calldataload(mul(13,4)) + let a14 := calldataload(mul(14,4)) + let a15 := calldataload(mul(15,4)) + let a16 := calldataload(mul(16,4)) + let a17 := calldataload(mul(17,4)) + sstore(0, a1) + sstore(mul(17,4), a17) + sstore(mul(16,4), a16) + sstore(mul(15,4), a15) + sstore(mul(14,4), a14) + sstore(mul(13,4), a13) + sstore(mul(12,4), a12) + sstore(mul(11,4), a11) + sstore(mul(10,4), a10) + sstore(mul(9,4), a9) + sstore(mul(8,h()), a8) + sstore(mul(7,4), a7) + sstore(mul(6,4), a6) + sstore(mul(5,4), a5) + sstore(mul(4,4), a4) + sstore(mul(3,4), a3) + sstore(mul(2,4), a2) + sstore(mul(1,4), a1) + } + function g() -> v { + let a1 := calldataload(mul(1,4)) + let a2 := calldataload(mul(2,4)) + let a3 := calldataload(mul(3,4)) + let a4 := calldataload(mul(4,4)) + let a5 := calldataload(mul(5,4)) + let a6 := calldataload(mul(6,4)) + let a7 := calldataload(mul(7,4)) + let a8 := calldataload(mul(8,4)) + let a9 := calldataload(mul(9,4)) + a1 := calldataload(mul(0,4)) + let a10 := calldataload(mul(10,4)) + let a11 := calldataload(mul(11,4)) + let a12 := calldataload(mul(12,4)) + let a13 := calldataload(mul(13,4)) + let a14 := calldataload(mul(14,4)) + let a15 := calldataload(mul(15,4)) + let a16 := calldataload(mul(16,4)) + let a17 := calldataload(mul(17,4)) + sstore(0, a1) + sstore(mul(17,4), a17) + sstore(mul(16,4), a16) + sstore(mul(15,4), a15) + sstore(mul(14,4), a14) + sstore(mul(13,4), a13) + sstore(mul(12,4), a12) + sstore(mul(11,4), a11) + sstore(mul(10,4), a10) + sstore(mul(9,4), a9) + sstore(mul(8,4), a8) + sstore(mul(7,4), a7) + sstore(mul(6,4), a6) + sstore(mul(5,4), a5) + sstore(mul(4,4), a4) + sstore(mul(3,4), a3) + sstore(mul(2,4), a2) + sstore(mul(1,4), a1) + v := i() + } + function h() -> v { + let a1 := calldataload(mul(1,4)) + let a2 := calldataload(mul(2,4)) + let a3 := calldataload(mul(3,4)) + let a4 := calldataload(mul(4,4)) + let a5 := calldataload(mul(5,4)) + let a6 := calldataload(mul(6,4)) + let a7 := calldataload(mul(7,4)) + let a8 := calldataload(mul(8,4)) + let a9 := calldataload(mul(9,4)) + let a10 := calldataload(mul(10,4)) + let a11 := calldataload(mul(10,4)) + a1 := calldataload(mul(0,4)) + a2 := calldataload(mul(1,4)) + let a12 := calldataload(mul(12,4)) + let a13 := calldataload(mul(13,4)) + let a14 := calldataload(mul(14,4)) + let a15 := calldataload(mul(15,4)) + let a16 := calldataload(mul(16,4)) + let a17 := calldataload(mul(17,4)) + let a18 := calldataload(mul(18,4)) + let a19 := calldataload(mul(19,4)) + sstore(0, add(a1, a2)) + sstore(mul(17,4), a19) + sstore(mul(17,4), a18) + sstore(mul(17,4), a17) + sstore(mul(16,4), a16) + sstore(mul(15,4), a15) + sstore(mul(14,4), a14) + sstore(mul(13,4), a13) + sstore(mul(12,4), a12) + sstore(mul(11,4), a11) + sstore(mul(10,4), a10) + sstore(mul(9,4), a9) + sstore(mul(8,4), a8) + sstore(mul(7,4), a7) + sstore(mul(6,4), a6) + sstore(mul(5,4), a5) + sstore(mul(4,4), a4) + sstore(mul(3,4), a3) + sstore(mul(2,4), a2) + sstore(mul(1,4), a1) + v := i() + } + function i() -> v { + let a1 := calldataload(mul(1,4)) + let a2 := calldataload(mul(2,4)) + let a3 := calldataload(mul(3,4)) + let a4 := calldataload(mul(4,4)) + let a5 := calldataload(mul(5,4)) + let a6 := calldataload(mul(6,4)) + let a7 := calldataload(mul(7,4)) + let a8 := calldataload(mul(8,4)) + let a9 := calldataload(mul(9,4)) + a1 := calldataload(mul(0,4)) + let a10 := calldataload(mul(10,4)) + let a11 := calldataload(mul(11,4)) + let a12 := calldataload(mul(12,4)) + let a13 := calldataload(mul(13,4)) + let a14 := calldataload(mul(14,4)) + let a15 := calldataload(mul(15,4)) + let a16 := calldataload(mul(16,4)) + let a17 := calldataload(mul(17,4)) + sstore(0, a1) + sstore(mul(17,4), a17) + sstore(mul(16,4), a16) + sstore(mul(15,4), a15) + sstore(mul(14,4), a14) + sstore(mul(13,4), a13) + sstore(mul(12,4), a12) + sstore(mul(11,4), a11) + sstore(mul(10,4), a10) + sstore(mul(9,4), a9) + sstore(mul(8,4), a8) + sstore(mul(7,4), a7) + sstore(mul(6,4), a6) + sstore(mul(5,4), a5) + sstore(mul(4,4), a4) + sstore(mul(3,4), a3) + sstore(mul(2,4), a2) + sstore(mul(1,4), a1) + v := sload(mul(42,8)) + } +} +// ---- +// step: stackLimitEvader +// +// { +// { +// mstore(0x40, memoryguard(0x0100)) +// sstore(23, f()) +// } +// function f() -> v +// { +// mstore(0xe0, calldataload(mul(1, 4))) +// let a2 := calldataload(mul(2, 4)) +// let a3 := calldataload(mul(3, 4)) +// let a4 := calldataload(g()) +// let a5 := calldataload(mul(5, 4)) +// let a6 := calldataload(mul(6, 4)) +// let a7 := calldataload(mul(7, 4)) +// let a8 := calldataload(mul(8, 4)) +// let a9 := calldataload(mul(9, 4)) +// mstore(0xe0, calldataload(mul(0, 4))) +// let a10 := calldataload(mul(10, 4)) +// let a11 := calldataload(mul(11, 4)) +// let a12 := calldataload(mul(12, 4)) +// let a13 := calldataload(mul(13, 4)) +// let a14 := calldataload(mul(14, 4)) +// let a15 := calldataload(mul(15, 4)) +// let a16 := calldataload(mul(16, 4)) +// let a17 := calldataload(mul(17, 4)) +// sstore(0, mload(0xe0)) +// sstore(mul(17, 4), a17) +// sstore(mul(16, 4), a16) +// sstore(mul(15, 4), a15) +// sstore(mul(14, 4), a14) +// sstore(mul(13, 4), a13) +// sstore(mul(12, 4), a12) +// sstore(mul(11, 4), a11) +// sstore(mul(10, 4), a10) +// sstore(mul(9, 4), a9) +// sstore(mul(8, h()), a8) +// sstore(mul(7, 4), a7) +// sstore(mul(6, 4), a6) +// sstore(mul(5, 4), a5) +// sstore(mul(4, 4), a4) +// sstore(mul(3, 4), a3) +// sstore(mul(2, 4), a2) +// sstore(mul(1, 4), mload(0xe0)) +// } +// function g() -> v_1 +// { +// mstore(0xa0, calldataload(mul(1, 4))) +// let a2_3 := calldataload(mul(2, 4)) +// let a3_4 := calldataload(mul(3, 4)) +// let a4_5 := calldataload(mul(4, 4)) +// let a5_6 := calldataload(mul(5, 4)) +// let a6_7 := calldataload(mul(6, 4)) +// let a7_8 := calldataload(mul(7, 4)) +// let a8_9 := calldataload(mul(8, 4)) +// let a9_10 := calldataload(mul(9, 4)) +// mstore(0xa0, calldataload(mul(0, 4))) +// let a10_11 := calldataload(mul(10, 4)) +// let a11_12 := calldataload(mul(11, 4)) +// let a12_13 := calldataload(mul(12, 4)) +// let a13_14 := calldataload(mul(13, 4)) +// let a14_15 := calldataload(mul(14, 4)) +// let a15_16 := calldataload(mul(15, 4)) +// let a16_17 := calldataload(mul(16, 4)) +// let a17_18 := calldataload(mul(17, 4)) +// sstore(0, mload(0xa0)) +// sstore(mul(17, 4), a17_18) +// sstore(mul(16, 4), a16_17) +// sstore(mul(15, 4), a15_16) +// sstore(mul(14, 4), a14_15) +// sstore(mul(13, 4), a13_14) +// sstore(mul(12, 4), a12_13) +// sstore(mul(11, 4), a11_12) +// sstore(mul(10, 4), a10_11) +// sstore(mul(9, 4), a9_10) +// sstore(mul(8, 4), a8_9) +// sstore(mul(7, 4), a7_8) +// sstore(mul(6, 4), a6_7) +// sstore(mul(5, 4), a5_6) +// sstore(mul(4, 4), a4_5) +// sstore(mul(3, 4), a3_4) +// sstore(mul(2, 4), a2_3) +// sstore(mul(1, 4), mload(0xa0)) +// v_1 := i() +// } +// function h() -> v_19 +// { +// mstore(0xc0, calldataload(mul(1, 4))) +// mstore(0xa0, calldataload(mul(2, 4))) +// let a3_22 := calldataload(mul(3, 4)) +// let a4_23 := calldataload(mul(4, 4)) +// let a5_24 := calldataload(mul(5, 4)) +// let a6_25 := calldataload(mul(6, 4)) +// let a7_26 := calldataload(mul(7, 4)) +// let a8_27 := calldataload(mul(8, 4)) +// let a9_28 := calldataload(mul(9, 4)) +// let a10_29 := calldataload(mul(10, 4)) +// let a11_30 := calldataload(mul(10, 4)) +// mstore(0xc0, calldataload(mul(0, 4))) +// mstore(0xa0, calldataload(mul(1, 4))) +// let a12_31 := calldataload(mul(12, 4)) +// let a13_32 := calldataload(mul(13, 4)) +// let a14_33 := calldataload(mul(14, 4)) +// let a15_34 := calldataload(mul(15, 4)) +// let a16_35 := calldataload(mul(16, 4)) +// let a17_36 := calldataload(mul(17, 4)) +// let a18 := calldataload(mul(18, 4)) +// let a19 := calldataload(mul(19, 4)) +// sstore(0, add(mload(0xc0), mload(0xa0))) +// sstore(mul(17, 4), a19) +// sstore(mul(17, 4), a18) +// sstore(mul(17, 4), a17_36) +// sstore(mul(16, 4), a16_35) +// sstore(mul(15, 4), a15_34) +// sstore(mul(14, 4), a14_33) +// sstore(mul(13, 4), a13_32) +// sstore(mul(12, 4), a12_31) +// sstore(mul(11, 4), a11_30) +// sstore(mul(10, 4), a10_29) +// sstore(mul(9, 4), a9_28) +// sstore(mul(8, 4), a8_27) +// sstore(mul(7, 4), a7_26) +// sstore(mul(6, 4), a6_25) +// sstore(mul(5, 4), a5_24) +// sstore(mul(4, 4), a4_23) +// sstore(mul(3, 4), a3_22) +// sstore(mul(2, 4), mload(0xa0)) +// sstore(mul(1, 4), mload(0xc0)) +// v_19 := i() +// } +// function i() -> v_37 +// { +// mstore(0x80, calldataload(mul(1, 4))) +// let a2_39 := calldataload(mul(2, 4)) +// let a3_40 := calldataload(mul(3, 4)) +// let a4_41 := calldataload(mul(4, 4)) +// let a5_42 := calldataload(mul(5, 4)) +// let a6_43 := calldataload(mul(6, 4)) +// let a7_44 := calldataload(mul(7, 4)) +// let a8_45 := calldataload(mul(8, 4)) +// let a9_46 := calldataload(mul(9, 4)) +// mstore(0x80, calldataload(mul(0, 4))) +// let a10_47 := calldataload(mul(10, 4)) +// let a11_48 := calldataload(mul(11, 4)) +// let a12_49 := calldataload(mul(12, 4)) +// let a13_50 := calldataload(mul(13, 4)) +// let a14_51 := calldataload(mul(14, 4)) +// let a15_52 := calldataload(mul(15, 4)) +// let a16_53 := calldataload(mul(16, 4)) +// let a17_54 := calldataload(mul(17, 4)) +// sstore(0, mload(0x80)) +// sstore(mul(17, 4), a17_54) +// sstore(mul(16, 4), a16_53) +// sstore(mul(15, 4), a15_52) +// sstore(mul(14, 4), a14_51) +// sstore(mul(13, 4), a13_50) +// sstore(mul(12, 4), a12_49) +// sstore(mul(11, 4), a11_48) +// sstore(mul(10, 4), a10_47) +// sstore(mul(9, 4), a9_46) +// sstore(mul(8, 4), a8_45) +// sstore(mul(7, 4), a7_44) +// sstore(mul(6, 4), a6_43) +// sstore(mul(5, 4), a5_42) +// sstore(mul(4, 4), a4_41) +// sstore(mul(3, 4), a3_40) +// sstore(mul(2, 4), a2_39) +// sstore(mul(1, 4), mload(0x80)) +// v_37 := sload(mul(42, 8)) +// } +// }