Stack layout generator for new code generation.

This commit is contained in:
Daniel Kirchner 2021-07-05 18:13:44 +02:00
parent f4effe966e
commit f881409ea4
19 changed files with 2237 additions and 22 deletions

View File

@ -52,6 +52,21 @@ template <class T, class U> std::vector<T>& operator+=(std::vector<T>& _a, U&& _
std::move(_b.begin(), _b.end(), std::back_inserter(_a));
return _a;
}
/// Concatenate the contents of a container onto a list
template <class T, class U> std::list<T>& operator+=(std::list<T>& _a, U& _b)
{
for (auto const& i: _b)
_a.push_back(T(i));
return _a;
}
/// Concatenate the contents of a container onto a list, move variant.
template <class T, class U> std::list<T>& operator+=(std::list<T>& _a, U&& _b)
{
std::move(_b.begin(), _b.end(), std::back_inserter(_a));
return _a;
}
/// Concatenate the contents of a container onto a multiset
template <class U, class... T> std::multiset<T...>& operator+=(std::multiset<T...>& _a, U& _b)
{
@ -321,6 +336,44 @@ void joinMap(std::map<K, V>& _a, std::map<K, V>&& _b, F _conflictSolver)
}
}
namespace detail
{
template<typename Container, typename Value>
auto findOffset(Container&& _container, Value&& _value, int)
-> decltype(_container.find(_value) == _container.end(), std::distance(_container.begin(), _container.find(_value)), std::optional<size_t>())
{
auto it = _container.find(std::forward<Value>(_value));
auto end = _container.end();
if (it == end)
return std::nullopt;
return std::distance(_container.begin(), it);
}
template<typename Range, typename Value>
auto findOffset(Range&& _range, Value&& _value, void*)
-> decltype(std::find(std::begin(_range), std::end(_range), std::forward<Value>(_value)) == std::end(_range), std::optional<size_t>())
{
auto begin = std::begin(_range);
auto end = std::end(_range);
auto it = std::find(begin, end, std::forward<Value>(_value));
if (it == end)
return std::nullopt;
return std::distance(begin, it);
}
}
/// @returns an std::optional<size_t> containing the offset of the first element in @a _range that is equal to @a _value,
/// if any, or std::nullopt otherwise.
/// Uses a linear search (``std::find``) unless @a _range is a container and provides a
/// suitable ``.find`` function (e.g. it will use the logarithmic ``.find`` function in ``std::set`` instead).
template<typename Range>
auto findOffset(Range&& _range, std::remove_reference_t<decltype(*std::cbegin(_range))> const& _value)
-> decltype(detail::findOffset(std::forward<Range>(_range), _value, 0))
{
return detail::findOffset(std::forward<Range>(_range), _value, 0);
}
// String conversion functions, mainly to/from hex/nibble/byte representations.
enum class WhenError

View File

@ -68,6 +68,9 @@ add_library(yul
backends/evm/EVMMetrics.h
backends/evm/NoOutputAssembly.h
backends/evm/NoOutputAssembly.cpp
backends/evm/StackHelpers.h
backends/evm/StackLayoutGenerator.h
backends/evm/StackLayoutGenerator.cpp
backends/evm/VariableReferenceCounter.h
backends/evm/VariableReferenceCounter.cpp
backends/wasm/EVMToEwasmTranslator.cpp

View File

@ -24,6 +24,7 @@
#include <libyul/AST.h>
#include <libyul/AsmAnalysisInfo.h>
#include <libyul/Dialect.h>
#include <libyul/Exceptions.h>
#include <libyul/Scope.h>
#include <functional>
@ -54,8 +55,19 @@ struct FunctionCallReturnLabelSlot
/// the function.
struct FunctionReturnLabelSlot
{
bool operator==(FunctionReturnLabelSlot const&) const { return true; }
bool operator<(FunctionReturnLabelSlot const&) const { return false; }
std::reference_wrapper<Scope::Function const> function;
bool operator==(FunctionReturnLabelSlot const& _rhs) const
{
// There can never be return label slots of different functions on stack simultaneously.
yulAssert(&function.get() == &_rhs.function.get(), "");
return true;
}
bool operator<(FunctionReturnLabelSlot const& _rhs) const
{
// There can never be return label slots of different functions on stack simultaneously.
yulAssert(&function.get() == &_rhs.function.get(), "");
return false;
}
static constexpr bool canBeFreelyGenerated = false;
};
/// A slot containing the current value of a particular variable.

View File

@ -0,0 +1,424 @@
/*
This file is part of solidity.
solidity is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
solidity is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with solidity. If not, see <http://www.gnu.org/licenses/>.
*/
// SPDX-License-Identifier: GPL-3.0
#pragma once
#include <libyul/backends/evm/ControlFlowGraph.h>
#include <libyul/Exceptions.h>
#include <libsolutil/Visitor.h>
#include <range/v3/algorithm/all_of.hpp>
#include <range/v3/algorithm/any_of.hpp>
#include <range/v3/view/enumerate.hpp>
#include <range/v3/view/iota.hpp>
#include <range/v3/view/reverse.hpp>
#include <range/v3/view/take.hpp>
namespace solidity::yul
{
inline std::string stackSlotToString(StackSlot const& _slot)
{
return std::visit(util::GenericVisitor{
[](FunctionCallReturnLabelSlot const& _ret) -> std::string { return "RET[" + _ret.call.get().functionName.name.str() + "]"; },
[](FunctionReturnLabelSlot const&) -> std::string { return "RET"; },
[](VariableSlot const& _var) { return _var.variable.get().name.str(); },
[](LiteralSlot const& _lit) { return util::toCompactHexWithPrefix(_lit.value); },
[](TemporarySlot const& _tmp) -> std::string { return "TMP[" + _tmp.call.get().functionName.name.str() + ", " + std::to_string(_tmp.index) + "]"; },
[](JunkSlot const&) -> std::string { return "JUNK"; }
}, _slot);
}
inline std::string stackToString(Stack const& _stack)
{
std::string result("[ ");
for (auto const& slot: _stack)
result += stackSlotToString(slot) + ' ';
result += ']';
return result;
}
// Abstraction of stack shuffling operations. Can be defined as actual concept once we switch to C++20.
// Used as an interface for the stack shuffler below.
// The shuffle operation class is expected to internally keep track of a current stack layout (the "source layout")
// that the shuffler is supposed to shuffle to a fixed target stack layout.
// The shuffler works iteratively. At each iteration it instantiates an instance of the shuffle operations and
// queries it for various information about the current source stack layout and the target layout, as described
// in the interface below.
// Based on that information the shuffler decides which is the next optimal operation to perform on the stack
// and calls the corresponding entry point in the shuffling operations (swap, pushOrDupTarget or pop).
/*
template<typename ShuffleOperations>
concept ShuffleOperationConcept = requires(ShuffleOperations ops, size_t sourceOffset, size_t targetOffset, size_t depth) {
// Returns true, iff the current slot at sourceOffset in source layout is a suitable slot at targetOffset.
{ ops.isCompatible(sourceOffset, targetOffset) } -> std::convertible_to<bool>;
// Returns true, iff the slots at the two given source offsets are identical.
{ ops.sourceIsSame(sourceOffset, sourceOffset) } -> std::convertible_to<bool>;
// Returns a positive integer n, if the slot at the given source offset needs n more copies.
// Returns a negative integer -n, if the slot at the given source offsets occurs n times too many.
// Returns zero if the amount of occurrences, in the current source layout, of the slot at the given source offset
// matches the desired amount of occurrences in the target.
{ ops.sourceMultiplicity(sourceOffset) } -> std::convertible_to<int>;
// Returns a positive integer n, if the slot at the given target offset needs n more copies.
// Returns a negative integer -n, if the slot at the given target offsets occurs n times too many.
// Returns zero if the amount of occurrences, in the current source layout, of the slot at the given target offset
// matches the desired amount of occurrences in the target.
{ ops.targetMultiplicity(targetOffset) } -> std::convertible_to<int>;
// Returns true, iff any slot is compatible with the given target offset.
{ ops.targetIsArbitrary(targetOffset) } -> std::convertible_to<bool>;
// Returns the number of slots in the source layout.
{ ops.sourceSize() } -> std::convertible_to<size_t>;
// Returns the number of slots in the target layout.
{ ops.targetSize() } -> std::convertible_to<size_t>;
// Swaps the top most slot in the source with the slot `depth` slots below the top.
// In terms of EVM opcodes this is supposed to be a `SWAP<depth>`.
// In terms of vectors this is supposed to be `std::swap(source.at(source.size() - depth - 1, source.top))`.
{ ops.swap(depth) };
// Pops the top most slot in the source, i.e. the slot at offset ops.sourceSize() - 1.
// In terms of EVM opcodes this is `POP`.
// In terms of vectors this is `source.pop();`.
{ ops.pop() };
// Dups or pushes the slot that is supposed to end up at the given target offset.
{ ops.pushOrDupTarget(targetOffset) };
};
*/
/// Helper class that can perform shuffling of a source stack layout to a target stack layout via
/// abstracted shuffle operations.
template</*ShuffleOperationConcept*/ typename ShuffleOperations>
class Shuffler
{
public:
/// Executes the stack shuffling operations. Instantiates an instance of ShuffleOperations
/// in each iteration. Each iteration performs exactly one operation that modifies the stack.
/// After `shuffle`, source and target have the same size and all slots in the source layout are
/// compatible with the slots at the same target offset.
template<typename... Args>
static void shuffle(Args&&... args)
{
bool needsMoreShuffling = true;
// The shuffling algorithm should always terminate in polynomial time, but we provide a limit
// in case it does not terminate due to a bug.
size_t iterationCount = 0;
while (iterationCount < 1000 && (needsMoreShuffling = shuffleStep(std::forward<Args>(args)...)))
++iterationCount;
yulAssert(!needsMoreShuffling, "Could not create stack layout after 1000 iterations.");
}
private:
// If dupping an ideal slot causes a slot that will still be required to become unreachable, then dup
// the latter slot first.
// @returns true, if it performed a dup.
static bool dupDeepSlotIfRequired(ShuffleOperations& _ops)
{
// Check if the stack is large enough for anything to potentially become unreachable.
if (_ops.sourceSize() < 15)
return false;
// Check whether any deep slot might still be needed later (i.e. we still need to reach it with a DUP or SWAP).
for (size_t sourceOffset: ranges::views::iota(0u, _ops.sourceSize() - 15))
{
// This slot needs to be moved.
if (!_ops.isCompatible(sourceOffset, sourceOffset))
{
// If the current top fixes the slot, swap it down now.
if (_ops.isCompatible(_ops.sourceSize() - 1, sourceOffset))
{
_ops.swap(_ops.sourceSize() - sourceOffset - 1);
return true;
}
// Bring up a slot to fix this now, if possible.
if (bringUpTargetSlot(_ops, sourceOffset))
return true;
// Otherwise swap up the slot that will fix the offending slot.
for (auto offset: ranges::views::iota(sourceOffset + 1, _ops.sourceSize()))
if (_ops.isCompatible(offset, sourceOffset))
{
_ops.swap(_ops.sourceSize() - offset - 1);
return true;
}
// Otherwise give up - we will need stack compression or stack limit evasion.
}
// We need another copy of this slot.
else if (_ops.sourceMultiplicity(sourceOffset) > 0)
{
// If this slot occurs again later, we skip this occurrence.
if (ranges::any_of(
ranges::views::iota(sourceOffset + 1, _ops.sourceSize()),
[&](size_t _offset) { return _ops.sourceIsSame(sourceOffset, _offset); }
))
continue;
// Bring up the target slot that would otherwise become unreachable.
for (size_t targetOffset: ranges::views::iota(0u, _ops.targetSize()))
if (!_ops.targetIsArbitrary(targetOffset) && _ops.isCompatible(sourceOffset, targetOffset))
{
_ops.pushOrDupTarget(targetOffset);
return true;
}
}
}
return false;
}
/// Finds a slot to dup or push with the aim of eventually fixing @a _targetOffset in the target.
/// In the simplest case, the slot at @a _targetOffset has a multiplicity > 0, i.e. it can directly be dupped or pushed
/// and the next iteration will fix @a _targetOffset.
/// But, in general, there may already be enough copies of the slot that is supposed to end up at @a _targetOffset
/// on stack, s.t. it cannot be dupped again. In that case there has to be a copy of the desired slot on stack already
/// elsewhere that is not yet in place (`nextOffset` below). The fact that ``nextOffset`` is not in place means that
/// we can (recursively) try bringing up the slot that is supposed to end up at ``nextOffset`` in the *target*.
/// When the target slot at ``nextOffset`` is fixed, the current source slot at ``nextOffset`` will be
/// at the stack top, which is the slot required at @a _targetOffset.
static bool bringUpTargetSlot(ShuffleOperations& _ops, size_t _targetOffset)
{
std::list<size_t> toVisit{_targetOffset};
std::set<size_t> visited;
while (!toVisit.empty())
{
auto offset = *toVisit.begin();
toVisit.erase(toVisit.begin());
visited.emplace(offset);
if (_ops.targetMultiplicity(offset) > 0)
{
_ops.pushOrDupTarget(offset);
return true;
}
// There must be another slot we can dup/push that will lead to the target slot at ``offset`` to be fixed.
for (auto nextOffset: ranges::views::iota(0u, std::min(_ops.sourceSize(), _ops.targetSize())))
if (
!_ops.isCompatible(nextOffset, nextOffset) &&
_ops.isCompatible(nextOffset, offset)
)
if (!visited.count(nextOffset))
toVisit.emplace_back(nextOffset);
}
return false;
}
/// Performs a single stack operation, transforming the source layout closer to the target layout.
template<typename... Args>
static bool shuffleStep(Args&&... args)
{
ShuffleOperations ops{std::forward<Args>(args)...};
// All source slots are final.
if (ranges::all_of(
ranges::views::iota(0u, ops.sourceSize()),
[&](size_t _index) { return ops.isCompatible(_index, _index); }
))
{
// Bring up all remaining target slots, if any, or terminate otherwise.
if (ops.sourceSize() < ops.targetSize())
{
if (!dupDeepSlotIfRequired(ops))
yulAssert(bringUpTargetSlot(ops, ops.sourceSize()), "");
return true;
}
return false;
}
size_t sourceTop = ops.sourceSize() - 1;
// If we no longer need the current stack top, we pop it, unless we need an arbitrary slot at this position
// in the target.
if (
ops.sourceMultiplicity(sourceTop) < 0 &&
!ops.targetIsArbitrary(sourceTop)
)
{
ops.pop();
return true;
}
yulAssert(ops.targetSize() > 0, "");
// If the top is not supposed to be exactly what is on top right now, try to find a lower position to swap it to.
if (!ops.isCompatible(sourceTop, sourceTop) || ops.targetIsArbitrary(sourceTop))
for (size_t offset: ranges::views::iota(0u, std::min(ops.sourceSize(), ops.targetSize())))
// It makes sense to swap to a lower position, if
if (
!ops.isCompatible(offset, offset) && // The lower slot is not already in position.
!ops.sourceIsSame(offset, sourceTop) && // We would not just swap identical slots.
ops.isCompatible(sourceTop, offset) // The lower position wants to have this slot.
)
{
// We cannot swap that deep.
if (ops.sourceSize() - offset - 1 > 16)
{
// If there is a reachable slot to be removed, park the current top there.
for (size_t swapDepth: ranges::views::iota(1u, 17u) | ranges::views::reverse)
if (ops.sourceMultiplicity(ops.sourceSize() - 1 - swapDepth) < 0)
{
ops.swap(swapDepth);
return true;
}
// Otherwise we rely on stack compression or stack-to-memory.
}
ops.swap(ops.sourceSize() - offset - 1);
return true;
}
// ops.sourceSize() > ops.targetSize() cannot be true anymore, since if the source top is no longer required,
// we already popped it, and if it is required, we already swapped it down to a suitable target position.
yulAssert(ops.sourceSize() <= ops.targetSize(), "");
// If a lower slot should be removed, try to bring up the slot that should end up there and bring it up.
// Note that after the cases above, there will always be a target slot to duplicate in this case.
for (size_t offset: ranges::views::iota(0u, ops.sourceSize()))
if (
!ops.isCompatible(offset, offset) && // The lower slot is not already in position.
ops.sourceMultiplicity(offset) < 0 && // We have too many copies of this slot.
offset <= ops.targetSize() && // There is a target slot at this position.
!ops.targetIsArbitrary(offset) // And that target slot is not arbitrary.
)
{
if (!dupDeepSlotIfRequired(ops))
yulAssert(bringUpTargetSlot(ops, offset), "");
return true;
}
// At this point we want to keep all slots.
for (size_t i = 0; i < ops.sourceSize(); ++i)
yulAssert(ops.sourceMultiplicity(i) >= 0, "");
yulAssert(ops.sourceSize() <= ops.targetSize(), "");
// If the top is not in position, try to find a slot that wants to be at the top and swap it up.
if (!ops.isCompatible(sourceTop, sourceTop))
for (size_t sourceOffset: ranges::views::iota(0u, ops.sourceSize()))
if (
!ops.isCompatible(sourceOffset, sourceOffset) &&
ops.isCompatible(sourceOffset, sourceTop)
)
{
ops.swap(ops.sourceSize() - sourceOffset - 1);
return true;
}
// If we still need more slots, produce a suitable one.
if (ops.sourceSize() < ops.targetSize())
{
if (!dupDeepSlotIfRequired(ops))
yulAssert(bringUpTargetSlot(ops, ops.sourceSize()), "");
return true;
}
// The stack has the correct size, each slot has the correct number of copies and the top is in position.
yulAssert(ops.sourceSize() == ops.targetSize(), "");
size_t size = ops.sourceSize();
for (size_t i = 0; i < ops.sourceSize(); ++i)
yulAssert(ops.sourceMultiplicity(i) == 0 && (ops.targetIsArbitrary(i) || ops.targetMultiplicity(i) == 0), "");
yulAssert(ops.isCompatible(sourceTop, sourceTop), "");
// If we find a lower slot that is out of position, but also compatible with the top, swap that up.
for (size_t offset: ranges::views::iota(0u, size))
if (!ops.isCompatible(offset, offset) && ops.isCompatible(sourceTop, offset))
{
ops.swap(size - offset - 1);
return true;
}
// Swap up any slot that is still out of position.
for (size_t offset: ranges::views::iota(0u, size))
if (!ops.isCompatible(offset, offset) && !ops.sourceIsSame(offset, sourceTop))
{
ops.swap(size - offset - 1);
return true;
}
yulAssert(false, "");
}
};
/// Transforms @a _currentStack to @a _targetStack, invoking the provided shuffling operations.
/// Modifies @a _currentStack itself after each invocation of the shuffling operations.
template<typename Swap, typename PushOrDup, typename Pop>
void createStackLayout(Stack& _currentStack, Stack const& _targetStack, Swap _swap, PushOrDup _pushOrDup, Pop _pop)
{
struct ShuffleOperations
{
Stack& currentStack;
Stack const& targetStack;
Swap swapCallback;
PushOrDup pushOrDupCallback;
Pop popCallback;
std::map<StackSlot, int> multiplicity;
ShuffleOperations(
Stack& _currentStack,
Stack const& _targetStack,
Swap _swap,
PushOrDup _pushOrDup,
Pop _pop
):
currentStack(_currentStack),
targetStack(_targetStack),
swapCallback(_swap),
pushOrDupCallback(_pushOrDup),
popCallback(_pop)
{
for (auto const& slot: currentStack)
--multiplicity[slot];
for (auto&& [offset, slot]: targetStack | ranges::views::enumerate)
if (std::holds_alternative<JunkSlot>(slot) && offset < currentStack.size())
++multiplicity[currentStack.at(offset)];
else
++multiplicity[slot];
}
bool isCompatible(size_t _source, size_t _target)
{
return
_source < currentStack.size() &&
_target < targetStack.size() &&
(
std::holds_alternative<JunkSlot>(targetStack.at(_target)) ||
currentStack.at(_source) == targetStack.at(_target)
);
}
bool sourceIsSame(size_t _lhs, size_t _rhs) { return currentStack.at(_lhs) == currentStack.at(_rhs); }
int sourceMultiplicity(size_t _offset) { return multiplicity.at(currentStack.at(_offset)); }
int targetMultiplicity(size_t _offset) { return multiplicity.at(targetStack.at(_offset)); }
bool targetIsArbitrary(size_t offset)
{
return offset < targetStack.size() && std::holds_alternative<JunkSlot>(targetStack.at(offset));
}
void swap(size_t _i)
{
swapCallback(static_cast<unsigned>(_i));
std::swap(currentStack.at(currentStack.size() - _i - 1), currentStack.back());
}
size_t sourceSize() { return currentStack.size(); }
size_t targetSize() { return targetStack.size(); }
void pop()
{
popCallback();
currentStack.pop_back();
}
void pushOrDupTarget(size_t _offset)
{
auto const& targetSlot = targetStack.at(_offset);
pushOrDupCallback(targetSlot);
currentStack.push_back(targetSlot);
}
};
Shuffler<ShuffleOperations>::shuffle(_currentStack, _targetStack, _swap, _pushOrDup, _pop);
yulAssert(_currentStack.size() == _targetStack.size(), "");
for (auto&& [current, target]: ranges::zip_view(_currentStack, _targetStack))
if (std::holds_alternative<JunkSlot>(target))
current = JunkSlot{};
else
yulAssert(current == target, "");
}
}

View File

@ -0,0 +1,578 @@
/*
This file is part of solidity.
solidity is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
solidity is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with solidity. If not, see <http://www.gnu.org/licenses/>.
*/
// SPDX-License-Identifier: GPL-3.0
/**
* Stack layout generator for Yul to EVM code generation.
*/
#include <libyul/backends/evm/StackLayoutGenerator.h>
#include <libyul/backends/evm/StackHelpers.h>
#include <libsolutil/Algorithms.h>
#include <libsolutil/cxx20.h>
#include <libsolutil/Visitor.h>
#include <range/v3/algorithm/any_of.hpp>
#include <range/v3/range/conversion.hpp>
#include <range/v3/view/all.hpp>
#include <range/v3/view/concat.hpp>
#include <range/v3/view/drop.hpp>
#include <range/v3/view/drop_last.hpp>
#include <range/v3/view/filter.hpp>
#include <range/v3/view/iota.hpp>
#include <range/v3/view/map.hpp>
#include <range/v3/view/reverse.hpp>
#include <range/v3/view/take.hpp>
#include <range/v3/view/transform.hpp>
using namespace solidity;
using namespace solidity::yul;
using namespace std;
StackLayout StackLayoutGenerator::run(CFG const& _cfg)
{
StackLayout stackLayout;
StackLayoutGenerator{stackLayout}.processEntryPoint(*_cfg.entry);
for (auto& functionInfo: _cfg.functionInfo | ranges::views::values)
StackLayoutGenerator{stackLayout}.processEntryPoint(*functionInfo.entry);
return stackLayout;
}
StackLayoutGenerator::StackLayoutGenerator(StackLayout& _layout): m_layout(_layout)
{
}
namespace
{
/// @returns the ideal stack to have before executing an operation that outputs @a _operationOutput, s.t.
/// shuffling to @a _post is cheap (excluding the input of the operation itself).
/// If @a _generateSlotOnTheFly returns true for a slot, this slot should not occur in the ideal stack, but
/// rather be generated on the fly during shuffling.
template<typename Callable>
Stack createIdealLayout(Stack const& _operationOutput, Stack const& _post, Callable _generateSlotOnTheFly)
{
struct PreviousSlot { size_t slot; };
// Determine the number of slots that have to be on stack before executing the operation (excluding
// the inputs of the operation itself).
// That is slots that should not be generated on the fly and are not outputs of the operation.
size_t preOperationLayoutSize = _post.size();
for (auto const& slot: _post)
if (util::contains(_operationOutput, slot) || _generateSlotOnTheFly(slot))
--preOperationLayoutSize;
// The symbolic layout directly after the operation has the form
// PreviousSlot{0}, ..., PreviousSlot{n}, [output<0>], ..., [output<m>]
auto layout = ranges::views::iota(0u, preOperationLayoutSize) |
ranges::views::transform([](size_t _index) { return PreviousSlot{_index}; }) |
ranges::to<vector<variant<PreviousSlot, StackSlot>>>;
layout += _operationOutput;
// Shortcut for trivial case.
if (layout.empty())
return Stack{};
// Next we will shuffle the layout to the post stack using ShuffleOperations
// that are aware of PreviousSlot's.
struct ShuffleOperations
{
vector<variant<PreviousSlot, StackSlot>>& layout;
Stack const& post;
std::set<StackSlot> outputs;
std::map<StackSlot, int> multiplicity;
Callable generateSlotOnTheFly;
ShuffleOperations(
vector<variant<PreviousSlot, StackSlot>>& _layout,
Stack const& _post,
Callable _generateSlotOnTheFly
): layout(_layout), post(_post), generateSlotOnTheFly(_generateSlotOnTheFly)
{
for (auto const& layoutSlot: layout)
if (StackSlot const* slot = get_if<StackSlot>(&layoutSlot))
outputs.insert(*slot);
for (auto const& layoutSlot: layout)
if (StackSlot const* slot = get_if<StackSlot>(&layoutSlot))
--multiplicity[*slot];
for (auto&& slot: post)
if (outputs.count(slot) || generateSlotOnTheFly(slot))
++multiplicity[slot];
}
bool isCompatible(size_t _source, size_t _target)
{
return
_source < layout.size() &&
_target < post.size() &&
(
std::holds_alternative<JunkSlot>(post.at(_target)) ||
std::visit(util::GenericVisitor{
[&](PreviousSlot const&) {
return !outputs.count(post.at(_target)) && !generateSlotOnTheFly(post.at(_target));
},
[&](StackSlot const& _s) { return _s == post.at(_target); }
}, layout.at(_source))
);
}
bool sourceIsSame(size_t _lhs, size_t _rhs)
{
return std::visit(util::GenericVisitor{
[&](PreviousSlot const&, PreviousSlot const&) { return true; },
[&](StackSlot const& _lhs, StackSlot const& _rhs) { return _lhs == _rhs; },
[&](auto const&, auto const&) { return false; }
}, layout.at(_lhs), layout.at(_rhs));
}
int sourceMultiplicity(size_t _offset)
{
return std::visit(util::GenericVisitor{
[&](PreviousSlot const&) { return 0; },
[&](StackSlot const& _s) { return multiplicity.at(_s); }
}, layout.at(_offset));
}
int targetMultiplicity(size_t _offset)
{
if (!outputs.count(post.at(_offset)) && !generateSlotOnTheFly(post.at(_offset)))
return 0;
return multiplicity.at(post.at(_offset));
}
bool targetIsArbitrary(size_t _offset)
{
return _offset < post.size() && std::holds_alternative<JunkSlot>(post.at(_offset));
}
void swap(size_t _i)
{
yulAssert(!holds_alternative<PreviousSlot>(layout.at(layout.size() - _i - 1)) || !holds_alternative<PreviousSlot>(layout.back()), "");
std::swap(layout.at(layout.size() - _i - 1), layout.back());
}
size_t sourceSize() { return layout.size(); }
size_t targetSize() { return post.size(); }
void pop() { layout.pop_back(); }
void pushOrDupTarget(size_t _offset) { layout.push_back(post.at(_offset)); }
};
Shuffler<ShuffleOperations>::shuffle(layout, _post, _generateSlotOnTheFly);
// Now we can construct the ideal layout before the operation.
// "layout" has shuffled the PreviousSlot{x} to new places using minimal operations to move the operation
// output in place. The resulting permutation of the PreviousSlot yields the ideal positions of slots
// before the operation, i.e. if PreviousSlot{2} is at a position at which _post contains VariableSlot{"tmp"},
// then we want the variable tmp in the slot at offset 2 in the layout before the operation.
vector<optional<StackSlot>> idealLayout(_post.size(), nullopt);
for (auto const& [slot, idealPosition]: ranges::zip_view(_post, layout))
if (PreviousSlot* previousSlot = std::get_if<PreviousSlot>(&idealPosition))
idealLayout.at(previousSlot->slot) = slot;
// The tail of layout must have contained the operation outputs and will not have been assigned slots in the last loop.
while (!idealLayout.empty() && !idealLayout.back())
idealLayout.pop_back();
yulAssert(idealLayout.size() == preOperationLayoutSize, "");
return idealLayout | ranges::views::transform([](optional<StackSlot> s) {
yulAssert(s, "");
return *s;
}) | ranges::to<Stack>;
}
}
Stack StackLayoutGenerator::propagateStackThroughOperation(Stack _exitStack, CFG::Operation const& _operation)
{
// This is a huge tradeoff between code size, gas cost and stack size.
auto generateSlotOnTheFly = [&](StackSlot const&) {
//return stack.size() > 12 && canBeFreelyGenerated(_slot);
// return canBeFreelyGenerated(_slot);
return false;
};
// Determine the ideal permutation of the slots in _exitLayout that are not operation outputs (and not to be
// generated on the fly), s.t. shuffling the `stack + _operation.output` to _exitLayout is cheap.
Stack stack = createIdealLayout(_operation.output, _exitStack, generateSlotOnTheFly);
// Make sure the resulting previous slots do not overlap with any assignmed variables.
if (auto const* assignment = get_if<CFG::Assignment>(&_operation.operation))
for (auto& stackSlot: stack)
if (auto const* varSlot = get_if<VariableSlot>(&stackSlot))
yulAssert(!util::contains(assignment->variables, *varSlot), "");
// Since stack+_operation.output can be easily shuffled to _exitLayout, the desired layout before the operation
// is stack+_operation.input;
stack += _operation.input;
// Store the exact desired operation entry layout. The stored layout will be recreated by the code transform
// before executing the operation. However, this recreation can produce slots that can be freely generated or
// are duplicated, i.e. we can compress the stack afterwards without causing problems for code generation later.
m_layout.operationEntryLayout[&_operation] = stack;
// Remove anything from the stack top that can be freely generated or dupped from deeper on the stack.
while (!stack.empty())
{
if (canBeFreelyGenerated(stack.back()))
stack.pop_back();
else if (auto offset = util::findOffset(stack | ranges::views::reverse | ranges::views::drop(1), stack.back()))
{
if (*offset + 2 < 16)
stack.pop_back();
else
break;
}
else
break;
}
// TODO: there may be a better criterion than overall stack size.
if (stack.size() > 12)
// Deduplicate and remove slots that can be freely generated.
stack = compressStack(move(stack));
return stack;
}
Stack StackLayoutGenerator::propagateStackThroughBlock(Stack _exitStack, CFG::BasicBlock const& _block)
{
Stack stack = std::move(_exitStack);
for (auto& operation: _block.operations | ranges::views::reverse)
stack = propagateStackThroughOperation(stack, operation);
return stack;
}
void StackLayoutGenerator::processEntryPoint(CFG::BasicBlock const& _entry)
{
list<CFG::BasicBlock const*> toVisit{&_entry};
set<CFG::BasicBlock const*> visited;
// TODO: check whether visiting only a subset of these in the outer iteration below is enough.
list<pair<CFG::BasicBlock const*, CFG::BasicBlock const*>> backwardsJumps = collectBackwardsJumps(_entry);
while (!toVisit.empty())
{
// First calculate stack layouts without walking backwards jumps, i.e. assuming the current preliminary
// entry layout of the backwards jump target as the initial exit layout of the backwards-jumping block.
while (!toVisit.empty())
{
CFG::BasicBlock const *block = *toVisit.begin();
toVisit.pop_front();
if (visited.count(block))
continue;
if (std::optional<Stack> exitLayout = getExitLayoutOrStageDependencies(*block, visited, toVisit))
{
visited.emplace(block);
auto& info = m_layout.blockInfos[block];
info.exitLayout = *exitLayout;
info.entryLayout = propagateStackThroughBlock(info.exitLayout, *block);
for (auto entry: block->entries)
toVisit.emplace_back(entry);
}
else
continue;
}
// Determine which backwards jumps still require fixing and stage revisits of appropriate nodes.
for (auto [jumpingBlock, target]: backwardsJumps)
// This block jumps backwards, but does not provide all slots required by the jump target on exit.
// Therefore we need to visit the subgraph between ``target`` and ``jumpingBlock`` again.
if (ranges::any_of(
m_layout.blockInfos[target].entryLayout,
[exitLayout = m_layout.blockInfos[jumpingBlock].exitLayout](StackSlot const& _slot) {
return !util::contains(exitLayout, _slot);
}
))
{
// In particular we can visit backwards starting from ``jumpingBlock`` and mark all entries to-be-visited-
// again until we hit ``target``.
toVisit.emplace_front(jumpingBlock);
// Since we are likely to permute the entry layout of ``target``, we also visit its entries again.
// This is not required for correctness, since the set of stack slots will match, but it may move some
// required stack shuffling from the loop condition to outside the loop.
for (CFG::BasicBlock const* entry: target->entries)
visited.erase(entry);
util::BreadthFirstSearch<CFG::BasicBlock const*>{{jumpingBlock}}.run(
[&visited, target = target](CFG::BasicBlock const* _block, auto _addChild) {
visited.erase(_block);
if (_block == target)
return;
for (auto const* entry: _block->entries)
_addChild(entry);
}
);
// While the shuffled layout for ``target`` will be compatible, it can be worthwhile propagating
// it further up once more.
// This would mean not stopping at _block == target above, resp. even doing visited.clear() here, revisiting the entire graph.
// This is a tradeoff between the runtime of this process and the optimality of the result.
// Also note that while visiting the entire graph again *can* be helpful, it can also be detrimental.
}
}
stitchConditionalJumps(_entry);
}
optional<Stack> StackLayoutGenerator::getExitLayoutOrStageDependencies(
CFG::BasicBlock const& _block,
set<CFG::BasicBlock const*> const& _visited,
list<CFG::BasicBlock const*>& _toVisit
) const
{
return std::visit(util::GenericVisitor{
[&](CFG::BasicBlock::MainExit const&) -> std::optional<Stack>
{
// On the exit of the outermost block the stack can be empty.
return Stack{};
},
[&](CFG::BasicBlock::Jump const& _jump) -> std::optional<Stack>
{
if (_jump.backwards)
{
// Choose the best currently known entry layout of the jump target as initial exit.
// Note that this may not yet be the final layout.
if (auto* info = util::valueOrNullptr(m_layout.blockInfos, _jump.target))
return info->entryLayout;
return Stack{};
}
// If the current iteration has already visited the jump target, start from its entry layout.
if (_visited.count(_jump.target))
return m_layout.blockInfos.at(_jump.target).entryLayout;
// Otherwise stage the jump target for visit and defer the current block.
_toVisit.emplace_front(_jump.target);
return nullopt;
},
[&](CFG::BasicBlock::ConditionalJump const& _conditionalJump) -> std::optional<Stack>
{
bool zeroVisited = _visited.count(_conditionalJump.zero);
bool nonZeroVisited = _visited.count(_conditionalJump.nonZero);
if (zeroVisited && nonZeroVisited)
{
// If the current iteration has already visited both jump targets, start from its entry layout.
Stack stack = combineStack(
m_layout.blockInfos.at(_conditionalJump.zero).entryLayout,
m_layout.blockInfos.at(_conditionalJump.nonZero).entryLayout
);
// Additionally, the jump condition has to be at the stack top at exit.
stack.emplace_back(_conditionalJump.condition);
return stack;
}
// If one of the jump targets has not been visited, stage it for visit and defer the current block.
if (!zeroVisited)
_toVisit.emplace_front(_conditionalJump.zero);
if (!nonZeroVisited)
_toVisit.emplace_front(_conditionalJump.nonZero);
return nullopt;
},
[&](CFG::BasicBlock::FunctionReturn const& _functionReturn) -> std::optional<Stack>
{
// A function return needs the return variables and the function return label slot on stack.
yulAssert(_functionReturn.info, "");
Stack stack = _functionReturn.info->returnVariables | ranges::views::transform([](auto const& _varSlot){
return StackSlot{_varSlot};
}) | ranges::to<Stack>;
stack.emplace_back(FunctionReturnLabelSlot{_functionReturn.info->function});
return stack;
},
[&](CFG::BasicBlock::Terminated const&) -> std::optional<Stack>
{
// A terminating block can have an empty stack on exit.
return Stack{};
},
}, _block.exit);
}
list<pair<CFG::BasicBlock const*, CFG::BasicBlock const*>> StackLayoutGenerator::collectBackwardsJumps(CFG::BasicBlock const& _entry) const
{
list<pair<CFG::BasicBlock const*, CFG::BasicBlock const*>> backwardsJumps;
util::BreadthFirstSearch<CFG::BasicBlock const*>{{&_entry}}.run([&](CFG::BasicBlock const* _block, auto _addChild) {
std::visit(util::GenericVisitor{
[&](CFG::BasicBlock::MainExit const&) {},
[&](CFG::BasicBlock::Jump const& _jump)
{
if (_jump.backwards)
backwardsJumps.emplace_back(_block, _jump.target);
_addChild(_jump.target);
},
[&](CFG::BasicBlock::ConditionalJump const& _conditionalJump)
{
_addChild(_conditionalJump.zero);
_addChild(_conditionalJump.nonZero);
},
[&](CFG::BasicBlock::FunctionReturn const&) {},
[&](CFG::BasicBlock::Terminated const&) {},
}, _block->exit);
});
return backwardsJumps;
}
void StackLayoutGenerator::stitchConditionalJumps(CFG::BasicBlock const& _block)
{
util::BreadthFirstSearch<CFG::BasicBlock const*> breadthFirstSearch{{&_block}};
breadthFirstSearch.run([&](CFG::BasicBlock const* _block, auto _addChild) {
auto& info = m_layout.blockInfos.at(_block);
std::visit(util::GenericVisitor{
[&](CFG::BasicBlock::MainExit const&) {},
[&](CFG::BasicBlock::Jump const& _jump)
{
if (!_jump.backwards)
_addChild(_jump.target);
},
[&](CFG::BasicBlock::ConditionalJump const& _conditionalJump)
{
auto& zeroTargetInfo = m_layout.blockInfos.at(_conditionalJump.zero);
auto& nonZeroTargetInfo = m_layout.blockInfos.at(_conditionalJump.nonZero);
Stack exitLayout = info.exitLayout;
// The last block must have produced the condition at the stack top.
yulAssert(!exitLayout.empty(), "");
yulAssert(exitLayout.back() == _conditionalJump.condition, "");
// The condition is consumed by the jump.
exitLayout.pop_back();
auto fixJumpTargetEntry = [&](Stack const& _originalEntryLayout) -> Stack {
Stack newEntryLayout = exitLayout;
// Whatever the block being jumped to does not actually require, can be marked as junk.
for (auto& slot: newEntryLayout)
if (!util::contains(_originalEntryLayout, slot))
slot = JunkSlot{};
// Make sure everything the block being jumped to requires is actually present or can be generated.
for (auto const& slot: _originalEntryLayout)
yulAssert(canBeFreelyGenerated(slot) || util::contains(newEntryLayout, slot), "");
return newEntryLayout;
};
zeroTargetInfo.entryLayout = fixJumpTargetEntry(zeroTargetInfo.entryLayout);
nonZeroTargetInfo.entryLayout = fixJumpTargetEntry(nonZeroTargetInfo.entryLayout);
_addChild(_conditionalJump.zero);
_addChild(_conditionalJump.nonZero);
},
[&](CFG::BasicBlock::FunctionReturn const&) {},
[&](CFG::BasicBlock::Terminated const&) { },
}, _block->exit);
});
}
Stack StackLayoutGenerator::combineStack(Stack const& _stack1, Stack const& _stack2)
{
// TODO: it would be nicer to replace this by a constructive algorithm.
// Currently it uses a reduced version of the Heap Algorithm to partly brute-force, which seems
// to work decently well.
Stack commonPrefix;
for (auto&& [slot1, slot2]: ranges::zip_view(_stack1, _stack2))
{
if (!(slot1 == slot2))
break;
commonPrefix.emplace_back(slot1);
}
Stack stack1Tail = _stack1 | ranges::views::drop(commonPrefix.size()) | ranges::to<Stack>;
Stack stack2Tail = _stack2 | ranges::views::drop(commonPrefix.size()) | ranges::to<Stack>;
if (stack1Tail.empty())
return commonPrefix + compressStack(stack2Tail);
if (stack2Tail.empty())
return commonPrefix + compressStack(stack1Tail);
Stack candidate;
for (auto slot: stack1Tail)
if (!util::contains(candidate, slot))
candidate.emplace_back(slot);
for (auto slot: stack2Tail)
if (!util::contains(candidate, slot))
candidate.emplace_back(slot);
cxx20::erase_if(candidate, [](StackSlot const& slot) {
return holds_alternative<LiteralSlot>(slot) || holds_alternative<FunctionCallReturnLabelSlot>(slot);
});
auto evaluate = [&](Stack const& _candidate) -> size_t {
size_t numOps = 0;
Stack testStack = _candidate;
auto swap = [&](unsigned _swapDepth) { ++numOps; if (_swapDepth > 16) numOps += 1000; };
auto dupOrPush = [&](StackSlot const& _slot)
{
if (canBeFreelyGenerated(_slot))
return;
auto depth = util::findOffset(ranges::concat_view(commonPrefix, testStack) | ranges::views::reverse, _slot);
if (depth && *depth >= 16)
numOps += 1000;
};
createStackLayout(testStack, stack1Tail, swap, dupOrPush, [&](){} );
testStack = _candidate;
createStackLayout(testStack, stack2Tail, swap, dupOrPush, [&](){});
return numOps;
};
// See https://en.wikipedia.org/wiki/Heap's_algorithm
size_t n = candidate.size();
Stack bestCandidate = candidate;
size_t bestCost = evaluate(candidate);
std::vector<size_t> c(n, 0);
size_t i = 1;
while (i < n)
{
if (c[i] < i)
{
if (i & 1)
std::swap(candidate.front(), candidate[i]);
else
std::swap(candidate[c[i]], candidate[i]);
size_t cost = evaluate(candidate);
if (cost < bestCost)
{
bestCost = cost;
bestCandidate = candidate;
}
++c[i];
// Note that for a proper implementation of the Heap algorithm this would need to revert back to ``i = 1.``
// However, the incorrect implementation produces decent result and the proper version would have n!
// complexity and is thereby not feasible.
++i;
}
else
{
c[i] = 0;
++i;
}
}
return commonPrefix + bestCandidate;
}
Stack StackLayoutGenerator::compressStack(Stack _stack)
{
optional<size_t> firstDupOffset;
do
{
if (firstDupOffset)
{
std::swap(_stack.at(*firstDupOffset), _stack.back());
_stack.pop_back();
firstDupOffset.reset();
}
for (auto&& [depth, slot]: _stack | ranges::views::reverse | ranges::views::enumerate)
if (canBeFreelyGenerated(slot))
{
firstDupOffset = _stack.size() - depth - 1;
break;
}
else if (auto dupDepth = util::findOffset(_stack | ranges::views::reverse | ranges::views::drop(depth + 1), slot))
if (depth + *dupDepth <= 16)
{
firstDupOffset = _stack.size() - depth - 1;
break;
}
}
while (firstDupOffset);
return _stack;
}

View File

@ -0,0 +1,97 @@
/*
This file is part of solidity.
solidity is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
solidity is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with solidity. If not, see <http://www.gnu.org/licenses/>.
*/
// SPDX-License-Identifier: GPL-3.0
/**
* Stack layout generator for Yul to EVM code generation.
*/
#pragma once
#include <libyul/backends/evm/ControlFlowGraph.h>
#include <map>
namespace solidity::yul
{
struct StackLayout
{
struct BlockInfo
{
/// Complete stack layout that is required for entering a block.
Stack entryLayout;
/// The resulting stack layout after executing the block.
Stack exitLayout;
};
std::map<CFG::BasicBlock const*, BlockInfo> blockInfos;
/// For each operation the complete stack layout that:
/// - has the slots required for the operation at the stack top.
/// - will have the operation result in a layout that makes it easy to achieve the next desired layout.
std::map<CFG::Operation const*, Stack> operationEntryLayout;
};
class StackLayoutGenerator
{
public:
static StackLayout run(CFG const& _cfg);
private:
StackLayoutGenerator(StackLayout& _context);
/// @returns the optimal entry stack layout, s.t. @a _operation can be applied to it and
/// the result can be transformed to @a _exitStack with minimal stack shuffling.
/// Simultaneously stores the entry layout required for executing the operation in m_layout.
Stack propagateStackThroughOperation(Stack _exitStack, CFG::Operation const& _operation);
/// @returns the desired stack layout at the entry of @a _block, assuming the layout after
/// executing the block should be @a _exitStack.
Stack propagateStackThroughBlock(Stack _exitStack, CFG::BasicBlock const& _block);
/// Main algorithm walking the graph from entry to exit and propagating back the stack layouts to the entries.
/// Iteratively reruns itself along backwards jumps until the layout is stabilized.
void processEntryPoint(CFG::BasicBlock const& _entry);
/// @returns the best known exit layout of @a _block, if all dependencies are already @a _visited.
/// If not, adds the dependencies to @a _dependencyList and @returns std::nullopt.
std::optional<Stack> getExitLayoutOrStageDependencies(
CFG::BasicBlock const& _block,
std::set<CFG::BasicBlock const*> const& _visited,
std::list<CFG::BasicBlock const*>& _dependencyList
) const;
/// @returns a pair of ``{jumpingBlock, targetBlock}`` for each backwards jump in the graph starting at @a _entry.
std::list<std::pair<CFG::BasicBlock const*, CFG::BasicBlock const*>> collectBackwardsJumps(CFG::BasicBlock const& _entry) const;
/// After the main algorithms, layouts at conditional jumps are merely compatible, i.e. the exit layout of the
/// jumping block is a superset of the entry layout of the target block. This function modifies the entry layouts
/// of conditional jump targets, s.t. the entry layout of target blocks match the exit layout of the jumping block
/// exactly, except that slots not required after the jump are marked as `JunkSlot`s.
void stitchConditionalJumps(CFG::BasicBlock const& _block);
/// Calculates the ideal stack layout, s.t. both @a _stack1 and @a _stack2 can be achieved with minimal
/// stack shuffling when starting from the returned layout.
static Stack combineStack(Stack const& _stack1, Stack const& _stack2);
/// @returns a copy of @a _stack stripped of all duplicates and slots that can be freely generated.
/// Attempts to create a layout that requires a minimal amount of operations to reconstruct the original
/// stack @a _stack.
static Stack compressStack(Stack _stack);
StackLayout& m_layout;
};
}

View File

@ -143,6 +143,8 @@ set(libyul_sources
libyul/ObjectCompilerTest.h
libyul/ObjectParser.cpp
libyul/Parser.cpp
libyul/StackLayoutGeneratorTest.cpp
libyul/StackLayoutGeneratorTest.h
libyul/SyntaxTest.h
libyul/SyntaxTest.cpp
libyul/YulInterpreterTest.cpp

View File

@ -32,6 +32,7 @@
#include <test/libyul/YulInterpreterTest.h>
#include <test/libyul/ObjectCompilerTest.h>
#include <test/libyul/FunctionSideEffects.h>
#include <test/libyul/StackLayoutGeneratorTest.h>
#include <test/libyul/SyntaxTest.h>
#include <boost/filesystem.hpp>
@ -61,6 +62,7 @@ Testsuite const g_interactiveTestsuites[] = {
{"Yul Interpreter", "libyul", "yulInterpreterTests", false, false, &yul::test::YulInterpreterTest::create},
{"Yul Object Compiler", "libyul", "objectCompiler", false, false, &yul::test::ObjectCompilerTest::create},
{"Yul Control Flow Graph", "libyul", "yulControlFlowGraph", false, false, &yul::test::ControlFlowGraphTest::create},
{"Yul Stack Layout", "libyul", "yulStackLayout", false, false, &yul::test::StackLayoutGeneratorTest::create},
{"Function Side Effects", "libyul", "functionSideEffects", false, false, &yul::test::FunctionSideEffects::create},
{"Yul Syntax", "libyul", "yulSyntaxTests", false, false, &yul::test::SyntaxTest::create},
{"EVM Code Transform", "libyul", "evmCodeTransform", false, false, &yul::test::EVMCodeTransformTest::create, {"nooptions"}},

View File

@ -22,6 +22,7 @@
#include <libyul/backends/evm/ControlFlowGraph.h>
#include <libyul/backends/evm/ControlFlowGraphBuilder.h>
#include <libyul/backends/evm/StackHelpers.h>
#include <libyul/Object.h>
#include <liblangutil/SourceReferenceFormatter.h>
@ -52,26 +53,6 @@ ControlFlowGraphTest::ControlFlowGraphTest(string const& _filename):
namespace
{
static std::string stackSlotToString(StackSlot const& _slot)
{
return std::visit(util::GenericVisitor{
[](FunctionCallReturnLabelSlot const& _ret) -> std::string { return "RET[" + _ret.call.get().functionName.name.str() + "]"; },
[](FunctionReturnLabelSlot const&) -> std::string { return "RET"; },
[](VariableSlot const& _var) { return _var.variable.get().name.str(); },
[](LiteralSlot const& _lit) { return util::toCompactHexWithPrefix(_lit.value); },
[](TemporarySlot const& _tmp) -> std::string { return "TMP[" + _tmp.call.get().functionName.name.str() + ", " + std::to_string(_tmp.index) + "]"; },
[](JunkSlot const&) -> std::string { return "JUNK"; }
}, _slot);
}
static std::string stackToString(Stack const& _stack)
{
std::string result("[ ");
for (auto const& slot: _stack)
result += stackSlotToString(slot) + ' ';
result += ']';
return result;
}
static std::string variableSlotToString(VariableSlot const& _slot)
{
return _slot.variable.get().name.str();

View File

@ -0,0 +1,267 @@
/*
This file is part of solidity.
solidity is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
solidity is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with solidity. If not, see <http://www.gnu.org/licenses/>.
*/
// SPDX-License-Identifier: GPL-3.0
#include <test/libyul/StackLayoutGeneratorTest.h>
#include <test/libyul/Common.h>
#include <test/Common.h>
#include <libyul/backends/evm/ControlFlowGraph.h>
#include <libyul/backends/evm/ControlFlowGraphBuilder.h>
#include <libyul/backends/evm/StackHelpers.h>
#include <libyul/backends/evm/StackLayoutGenerator.h>
#include <libyul/Object.h>
#include <liblangutil/SourceReferenceFormatter.h>
#include <libsolutil/AnsiColorized.h>
#include <libsolutil/Visitor.h>
#include <range/v3/view/reverse.hpp>
#ifdef ISOLTEST
#include <boost/process.hpp>
#endif
using namespace solidity;
using namespace solidity::util;
using namespace solidity::langutil;
using namespace solidity::yul;
using namespace solidity::yul::test;
using namespace solidity::frontend;
using namespace solidity::frontend::test;
using namespace std;
StackLayoutGeneratorTest::StackLayoutGeneratorTest(string const& _filename):
TestCase(_filename)
{
m_source = m_reader.source();
auto dialectName = m_reader.stringSetting("dialect", "evm");
m_dialect = &dialect(dialectName, solidity::test::CommonOptions::get().evmVersion());
m_expectation = m_reader.simpleExpectations();
}
namespace
{
static std::string variableSlotToString(VariableSlot const& _slot)
{
return _slot.variable.get().name.str();
}
}
class StackLayoutPrinter
{
public:
StackLayoutPrinter(std::ostream& _stream, StackLayout const& _stackLayout):
m_stream(_stream), m_stackLayout(_stackLayout)
{
}
void operator()(CFG::BasicBlock const& _block, bool _isMainEntry = true)
{
if (_isMainEntry)
{
m_stream << "Entry [label=\"Entry\"];\n";
m_stream << "Entry -> Block" << getBlockId(_block) << ";\n";
}
while (!m_blocksToPrint.empty())
{
CFG::BasicBlock const* block = *m_blocksToPrint.begin();
m_blocksToPrint.erase(m_blocksToPrint.begin());
printBlock(*block);
}
}
void operator()(
CFG::FunctionInfo const& _info
)
{
m_stream << "FunctionEntry_" << _info.function.name.str() << " [label=\"";
m_stream << "function " << _info.function.name.str() << "(";
m_stream << joinHumanReadable(_info.parameters | ranges::views::transform(variableSlotToString));
m_stream << ")";
if (!_info.returnVariables.empty())
{
m_stream << " -> ";
m_stream << joinHumanReadable(_info.returnVariables | ranges::views::transform(variableSlotToString));
}
m_stream << "\\l\\\n";
Stack functionEntryStack = {FunctionReturnLabelSlot{_info.function}};
functionEntryStack += _info.parameters | ranges::views::reverse;
m_stream << stackToString(functionEntryStack) << "\"];\n";
m_stream << "FunctionEntry_" << _info.function.name.str() << " -> Block" << getBlockId(*_info.entry) << ";\n";
(*this)(*_info.entry, false);
}
private:
void printBlock(CFG::BasicBlock const& _block)
{
m_stream << "Block" << getBlockId(_block) << " [label=\"\\\n";
// Verify that the entries of this block exit into this block.
for (auto const& entry: _block.entries)
std::visit(util::GenericVisitor{
[&](CFG::BasicBlock::Jump const& _jump)
{
soltestAssert(_jump.target == &_block, "Invalid control flow graph.");
},
[&](CFG::BasicBlock::ConditionalJump const& _conditionalJump)
{
soltestAssert(
_conditionalJump.zero == &_block || _conditionalJump.nonZero == &_block,
"Invalid control flow graph."
);
},
[&](auto const&)
{
soltestAssert(false, "Invalid control flow graph.");
}
}, entry->exit);
auto const& blockInfo = m_stackLayout.blockInfos.at(&_block);
m_stream << stackToString(blockInfo.entryLayout) << "\\l\\\n";
for (auto const& operation: _block.operations)
{
auto entryLayout = m_stackLayout.operationEntryLayout.at(&operation);
m_stream << stackToString(m_stackLayout.operationEntryLayout.at(&operation)) << "\\l\\\n";
std::visit(util::GenericVisitor{
[&](CFG::FunctionCall const& _call) {
m_stream << _call.function.get().name.str();
},
[&](CFG::BuiltinCall const& _call) {
m_stream << _call.functionCall.get().functionName.name.str();
},
[&](CFG::Assignment const& _assignment) {
m_stream << "Assignment(";
m_stream << joinHumanReadable(_assignment.variables | ranges::views::transform(variableSlotToString));
m_stream << ")";
}
}, operation.operation);
m_stream << "\\l\\\n";
soltestAssert(operation.input.size() <= entryLayout.size(), "Invalid Stack Layout.");
for (size_t i = 0; i < operation.input.size(); ++i)
entryLayout.pop_back();
entryLayout += operation.output;
m_stream << stackToString(entryLayout) << "\\l\\\n";
}
m_stream << stackToString(blockInfo.exitLayout) << "\\l\\\n";
m_stream << "\"];\n";
std::visit(util::GenericVisitor{
[&](CFG::BasicBlock::MainExit const&)
{
m_stream << "Block" << getBlockId(_block) << "Exit [label=\"MainExit\"];\n";
m_stream << "Block" << getBlockId(_block) << " -> Block" << getBlockId(_block) << "Exit;\n";
},
[&](CFG::BasicBlock::Jump const& _jump)
{
m_stream << "Block" << getBlockId(_block) << " -> Block" << getBlockId(_block) << "Exit [arrowhead=none];\n";
m_stream << "Block" << getBlockId(_block) << "Exit [label=\"";
if (_jump.backwards)
m_stream << "Backwards";
m_stream << "Jump\" shape=oval];\n";
m_stream << "Block" << getBlockId(_block) << "Exit -> Block" << getBlockId(*_jump.target) << ";\n";
},
[&](CFG::BasicBlock::ConditionalJump const& _conditionalJump)
{
m_stream << "Block" << getBlockId(_block) << " -> Block" << getBlockId(_block) << "Exit;\n";
m_stream << "Block" << getBlockId(_block) << "Exit [label=\"{ ";
m_stream << stackSlotToString(_conditionalJump.condition);
m_stream << "| { <0> Zero | <1> NonZero }}\" shape=Mrecord];\n";
m_stream << "Block" << getBlockId(_block);
m_stream << "Exit:0 -> Block" << getBlockId(*_conditionalJump.zero) << ";\n";
m_stream << "Block" << getBlockId(_block);
m_stream << "Exit:1 -> Block" << getBlockId(*_conditionalJump.nonZero) << ";\n";
},
[&](CFG::BasicBlock::FunctionReturn const& _return)
{
m_stream << "Block" << getBlockId(_block) << "Exit [label=\"FunctionReturn[" << _return.info->function.name.str() << "]\"];\n";
m_stream << "Block" << getBlockId(_block) << " -> Block" << getBlockId(_block) << "Exit;\n";
},
[&](CFG::BasicBlock::Terminated const&)
{
m_stream << "Block" << getBlockId(_block) << "Exit [label=\"Terminated\"];\n";
m_stream << "Block" << getBlockId(_block) << " -> Block" << getBlockId(_block) << "Exit;\n";
}
}, _block.exit);
m_stream << "\n";
}
size_t getBlockId(CFG::BasicBlock const& _block)
{
if (size_t* id = util::valueOrNullptr(m_blockIds, &_block))
return *id;
size_t id = m_blockIds[&_block] = m_blockCount++;
m_blocksToPrint.emplace_back(&_block);
return id;
}
std::ostream& m_stream;
StackLayout const& m_stackLayout;
std::map<CFG::BasicBlock const*, size_t> m_blockIds;
size_t m_blockCount = 0;
std::list<CFG::BasicBlock const*> m_blocksToPrint;
};
TestCase::TestResult StackLayoutGeneratorTest::run(ostream& _stream, string const& _linePrefix, bool const _formatted)
{
ErrorList errors;
auto [object, analysisInfo] = parse(m_source, *m_dialect, errors);
if (!object || !analysisInfo || !Error::containsOnlyWarnings(errors))
{
AnsiColorized(_stream, _formatted, {formatting::BOLD, formatting::RED}) << _linePrefix << "Error parsing source." << endl;
return TestResult::FatalError;
}
std::ostringstream output;
std::unique_ptr<CFG> cfg = ControlFlowGraphBuilder::build(*analysisInfo, *m_dialect, *object->code);
StackLayout stackLayout = StackLayoutGenerator::run(*cfg);
output << "digraph CFG {\nnodesep=0.7;\nnode[shape=box];\n\n";
StackLayoutPrinter printer{output, stackLayout};
printer(*cfg->entry);
for (auto function: cfg->functions)
printer(cfg->functionInfo.at(function));
output << "}\n";
m_obtainedResult = output.str();
auto result = checkResult(_stream, _linePrefix, _formatted);
#ifdef ISOLTEST
char* graphDisplayer = nullptr;
if (result == TestResult::Failure)
graphDisplayer = getenv("ISOLTEST_DISPLAY_GRAPHS_FAILURE");
else if (result == TestResult::Success)
graphDisplayer = getenv("ISOLTEST_DISPLAY_GRAPHS_SUCCESS");
if (graphDisplayer)
{
if (result == TestResult::Success)
std::cout << std::endl << m_source << std::endl;
boost::process::opstream pipe;
boost::process::child child(graphDisplayer, boost::process::std_in < pipe);
pipe << output.str();
pipe.flush();
pipe.pipe().close();
if (result == TestResult::Success)
child.wait();
else
child.detach();
}
#endif
return result;
}

View File

@ -0,0 +1,43 @@
/*
This file is part of solidity.
solidity is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
solidity is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with solidity. If not, see <http://www.gnu.org/licenses/>.
*/
// SPDX-License-Identifier: GPL-3.0
#pragma once
#include <test/TestCase.h>
namespace solidity::yul
{
struct Dialect;
namespace test
{
class StackLayoutGeneratorTest: public solidity::frontend::test::TestCase
{
public:
static std::unique_ptr<TestCase> create(Config const& _config)
{
return std::make_unique<StackLayoutGeneratorTest>(_config.filename);
}
explicit StackLayoutGeneratorTest(std::string const& _filename);
TestResult run(std::ostream& _stream, std::string const& _linePrefix = "", bool const _formatted = false) override;
private:
Dialect const* m_dialect = nullptr;
};
}
}

View File

@ -0,0 +1,318 @@
{
function f(a, b) -> c {
for { let x := 42 } lt(x, a) {
x := add(x, 1)
if calldataload(x)
{
sstore(0, x)
c := 0x21
leave
sstore(0x01, 0x0101)
}
sstore(0xFF, 0xFFFF)
}
{
switch mload(x)
case 0 {
sstore(a, b)
break
sstore(a, b)
}
case 1 {
sstore(0x04, x)
leave
sstore(a, 0x0505)
}
case 2 {
sstore(x, 0x06)
c := 42
revert(0, 0)
sstore(0x07, 0x0707)
}
case 3 {
sstore(0x08, 0x0808)
}
default {
if mload(b) {
return(0, 0)
sstore(0x09, 0x0909)
}
sstore(0x0A, 0x0A0A)
}
sstore(0x0B, 0x0B0B)
}
sstore(0x0C, 0x0C0C)
if sload(0x0D) {
c := 0x424242
}
}
pop(f(1,2))
}
// ----
// digraph CFG {
// nodesep=0.7;
// node[shape=box];
//
// Entry [label="Entry"];
// Entry -> Block0;
// Block0 [label="\
// [ ]\l\
// [ RET[f] 0x02 0x01 ]\l\
// f\l\
// [ TMP[f, 0] ]\l\
// [ TMP[f, 0] ]\l\
// pop\l\
// [ ]\l\
// [ ]\l\
// "];
// Block0Exit [label="MainExit"];
// Block0 -> Block0Exit;
//
// FunctionEntry_f [label="function f(a, b) -> c\l\
// [ RET b a ]"];
// FunctionEntry_f -> Block1;
// Block1 [label="\
// [ c RET a b ]\l\
// [ c RET a b 0x2a ]\l\
// Assignment(x)\l\
// [ c RET a b x ]\l\
// [ c RET a b x ]\l\
// "];
// Block1 -> Block1Exit [arrowhead=none];
// Block1Exit [label="Jump" shape=oval];
// Block1Exit -> Block2;
//
// Block2 [label="\
// [ c RET a b x ]\l\
// [ c RET a b x a x ]\l\
// lt\l\
// [ c RET a b x TMP[lt, 0] ]\l\
// [ c RET a b x TMP[lt, 0] ]\l\
// "];
// Block2 -> Block2Exit;
// Block2Exit [label="{ TMP[lt, 0]| { <0> Zero | <1> NonZero }}" shape=Mrecord];
// Block2Exit:0 -> Block3;
// Block2Exit:1 -> Block4;
//
// Block3 [label="\
// [ c RET JUNK JUNK JUNK ]\l\
// [ c RET 0x0c0c 0x0c ]\l\
// sstore\l\
// [ c RET ]\l\
// [ c RET 0x0d ]\l\
// sload\l\
// [ c RET TMP[sload, 0] ]\l\
// [ c RET TMP[sload, 0] ]\l\
// "];
// Block3 -> Block3Exit;
// Block3Exit [label="{ TMP[sload, 0]| { <0> Zero | <1> NonZero }}" shape=Mrecord];
// Block3Exit:0 -> Block5;
// Block3Exit:1 -> Block6;
//
// Block4 [label="\
// [ c RET a b x ]\l\
// [ c RET a b x x ]\l\
// mload\l\
// [ c RET a b x TMP[mload, 0] ]\l\
// [ c RET a b x TMP[mload, 0] ]\l\
// Assignment(GHOST[0])\l\
// [ c RET a b x GHOST[0] ]\l\
// [ c RET a b x GHOST[0] GHOST[0] 0x00 ]\l\
// eq\l\
// [ c RET a b x GHOST[0] TMP[eq, 0] ]\l\
// [ c RET a b x GHOST[0] TMP[eq, 0] ]\l\
// "];
// Block4 -> Block4Exit;
// Block4Exit [label="{ TMP[eq, 0]| { <0> Zero | <1> NonZero }}" shape=Mrecord];
// Block4Exit:0 -> Block7;
// Block4Exit:1 -> Block8;
//
// Block5 [label="\
// [ c RET ]\l\
// [ c RET ]\l\
// "];
// Block5Exit [label="FunctionReturn[f]"];
// Block5 -> Block5Exit;
//
// Block6 [label="\
// [ JUNK RET ]\l\
// [ RET 0x424242 ]\l\
// Assignment(c)\l\
// [ RET c ]\l\
// [ c RET ]\l\
// "];
// Block6 -> Block6Exit [arrowhead=none];
// Block6Exit [label="Jump" shape=oval];
// Block6Exit -> Block5;
//
// Block7 [label="\
// [ c RET a b x GHOST[0] ]\l\
// [ c RET a b x GHOST[0] GHOST[0] 0x01 ]\l\
// eq\l\
// [ c RET a b x GHOST[0] TMP[eq, 0] ]\l\
// [ c RET a b x GHOST[0] TMP[eq, 0] ]\l\
// "];
// Block7 -> Block7Exit;
// Block7Exit [label="{ TMP[eq, 0]| { <0> Zero | <1> NonZero }}" shape=Mrecord];
// Block7Exit:0 -> Block9;
// Block7Exit:1 -> Block10;
//
// Block8 [label="\
// [ c RET a b JUNK JUNK ]\l\
// [ c RET b a ]\l\
// sstore\l\
// [ c RET ]\l\
// [ c RET ]\l\
// "];
// Block8 -> Block8Exit [arrowhead=none];
// Block8Exit [label="Jump" shape=oval];
// Block8Exit -> Block3;
//
// Block9 [label="\
// [ c RET a b x GHOST[0] ]\l\
// [ c RET a b x GHOST[0] GHOST[0] 0x02 ]\l\
// eq\l\
// [ c RET a b x GHOST[0] TMP[eq, 0] ]\l\
// [ c RET a b x GHOST[0] TMP[eq, 0] ]\l\
// "];
// Block9 -> Block9Exit;
// Block9Exit [label="{ TMP[eq, 0]| { <0> Zero | <1> NonZero }}" shape=Mrecord];
// Block9Exit:0 -> Block11;
// Block9Exit:1 -> Block12;
//
// Block10 [label="\
// [ c RET JUNK JUNK x JUNK ]\l\
// [ c RET x 0x04 ]\l\
// sstore\l\
// [ c RET ]\l\
// [ c RET ]\l\
// "];
// Block10Exit [label="FunctionReturn[f]"];
// Block10 -> Block10Exit;
//
// Block11 [label="\
// [ c RET a b x GHOST[0] ]\l\
// [ c RET a b x GHOST[0] 0x03 ]\l\
// eq\l\
// [ c RET a b x TMP[eq, 0] ]\l\
// [ c RET a b x TMP[eq, 0] ]\l\
// "];
// Block11 -> Block11Exit;
// Block11Exit [label="{ TMP[eq, 0]| { <0> Zero | <1> NonZero }}" shape=Mrecord];
// Block11Exit:0 -> Block13;
// Block11Exit:1 -> Block14;
//
// Block12 [label="\
// [ JUNK JUNK JUNK JUNK x JUNK ]\l\
// [ 0x06 x ]\l\
// sstore\l\
// [ ]\l\
// [ 0x2a ]\l\
// Assignment(c)\l\
// [ c ]\l\
// [ 0x00 0x00 ]\l\
// revert\l\
// [ ]\l\
// [ ]\l\
// "];
// Block12Exit [label="Terminated"];
// Block12 -> Block12Exit;
//
// Block13 [label="\
// [ c RET a b x ]\l\
// [ c RET a b x b ]\l\
// mload\l\
// [ c RET a b x TMP[mload, 0] ]\l\
// [ c RET a b x TMP[mload, 0] ]\l\
// "];
// Block13 -> Block13Exit;
// Block13Exit [label="{ TMP[mload, 0]| { <0> Zero | <1> NonZero }}" shape=Mrecord];
// Block13Exit:0 -> Block15;
// Block13Exit:1 -> Block16;
//
// Block14 [label="\
// [ c RET a b x ]\l\
// [ c RET a b 0x01 x 0x0808 0x08 ]\l\
// sstore\l\
// [ c RET a b 0x01 x ]\l\
// [ c RET a b 0x01 x ]\l\
// "];
// Block14 -> Block14Exit [arrowhead=none];
// Block14Exit [label="Jump" shape=oval];
// Block14Exit -> Block17;
//
// Block15 [label="\
// [ c RET a b x ]\l\
// [ c RET a b 0x01 x 0x0a0a 0x0a ]\l\
// sstore\l\
// [ c RET a b 0x01 x ]\l\
// [ c RET a b 0x01 x ]\l\
// "];
// Block15 -> Block15Exit [arrowhead=none];
// Block15Exit [label="Jump" shape=oval];
// Block15Exit -> Block17;
//
// Block16 [label="\
// [ JUNK JUNK JUNK JUNK JUNK ]\l\
// [ 0x00 0x00 ]\l\
// return\l\
// [ ]\l\
// [ ]\l\
// "];
// Block16Exit [label="Terminated"];
// Block16 -> Block16Exit;
//
// Block17 [label="\
// [ c RET a b 0x01 x ]\l\
// [ c RET a b 0x01 x 0x0b0b 0x0b ]\l\
// sstore\l\
// [ c RET a b 0x01 x ]\l\
// [ c RET a b 0x01 x ]\l\
// "];
// Block17 -> Block17Exit [arrowhead=none];
// Block17Exit [label="Jump" shape=oval];
// Block17Exit -> Block18;
//
// Block18 [label="\
// [ c RET a b 0x01 x ]\l\
// [ c RET a b 0x01 x ]\l\
// add\l\
// [ c RET a b TMP[add, 0] ]\l\
// [ c RET a b TMP[add, 0] ]\l\
// Assignment(x)\l\
// [ c RET a b x ]\l\
// [ c RET x b a x ]\l\
// calldataload\l\
// [ c RET x b a TMP[calldataload, 0] ]\l\
// [ c RET x b a TMP[calldataload, 0] ]\l\
// "];
// Block18 -> Block18Exit;
// Block18Exit [label="{ TMP[calldataload, 0]| { <0> Zero | <1> NonZero }}" shape=Mrecord];
// Block18Exit:0 -> Block19;
// Block18Exit:1 -> Block20;
//
// Block19 [label="\
// [ c RET x b a ]\l\
// [ c RET x b a 0xffff 0xff ]\l\
// sstore\l\
// [ c RET x b a ]\l\
// [ c RET x b a ]\l\
// "];
// Block19 -> Block19Exit [arrowhead=none];
// Block19Exit [label="BackwardsJump" shape=oval];
// Block19Exit -> Block2;
//
// Block20 [label="\
// [ JUNK RET x JUNK JUNK ]\l\
// [ RET x 0x00 ]\l\
// sstore\l\
// [ RET ]\l\
// [ RET 0x21 ]\l\
// Assignment(c)\l\
// [ RET c ]\l\
// [ c RET ]\l\
// "];
// Block20Exit [label="FunctionReturn[f]"];
// Block20 -> Block20Exit;
//
// }

View File

@ -0,0 +1,91 @@
{
let x := 0x01
let y := 0x02
sstore(0x01, x)
for { sstore(0x02, 0x0202) } lt(x, 0x0303) { x := add(x,0x0404) } {
sstore(0x05, 0x0505)
y := sload(x)
}
sstore(0x06, 0x0506)
}
// ----
// digraph CFG {
// nodesep=0.7;
// node[shape=box];
//
// Entry [label="Entry"];
// Entry -> Block0;
// Block0 [label="\
// [ ]\l\
// [ 0x01 ]\l\
// Assignment(x)\l\
// [ x ]\l\
// [ x 0x02 ]\l\
// Assignment(y)\l\
// [ x y ]\l\
// [ x x 0x01 ]\l\
// sstore\l\
// [ x ]\l\
// [ x 0x0202 0x02 ]\l\
// sstore\l\
// [ x ]\l\
// [ x ]\l\
// "];
// Block0 -> Block0Exit [arrowhead=none];
// Block0Exit [label="Jump" shape=oval];
// Block0Exit -> Block1;
//
// Block1 [label="\
// [ x ]\l\
// [ x 0x0303 x ]\l\
// lt\l\
// [ x TMP[lt, 0] ]\l\
// [ x TMP[lt, 0] ]\l\
// "];
// Block1 -> Block1Exit;
// Block1Exit [label="{ TMP[lt, 0]| { <0> Zero | <1> NonZero }}" shape=Mrecord];
// Block1Exit:0 -> Block2;
// Block1Exit:1 -> Block3;
//
// Block2 [label="\
// [ JUNK ]\l\
// [ 0x0506 0x06 ]\l\
// sstore\l\
// [ ]\l\
// [ ]\l\
// "];
// Block2Exit [label="MainExit"];
// Block2 -> Block2Exit;
//
// Block3 [label="\
// [ x ]\l\
// [ 0x0404 x 0x0505 0x05 ]\l\
// sstore\l\
// [ 0x0404 x ]\l\
// [ 0x0404 x x ]\l\
// sload\l\
// [ 0x0404 x TMP[sload, 0] ]\l\
// [ 0x0404 x TMP[sload, 0] ]\l\
// Assignment(y)\l\
// [ 0x0404 x y ]\l\
// [ 0x0404 x ]\l\
// "];
// Block3 -> Block3Exit [arrowhead=none];
// Block3Exit [label="Jump" shape=oval];
// Block3Exit -> Block4;
//
// Block4 [label="\
// [ 0x0404 x ]\l\
// [ 0x0404 x ]\l\
// add\l\
// [ TMP[add, 0] ]\l\
// [ TMP[add, 0] ]\l\
// Assignment(x)\l\
// [ x ]\l\
// [ x ]\l\
// "];
// Block4 -> Block4Exit [arrowhead=none];
// Block4Exit [label="BackwardsJump" shape=oval];
// Block4Exit -> Block1;
//
// }

View File

@ -0,0 +1,117 @@
{
function f(a, b) -> r {
let x := add(a,b)
r := sub(x,a)
}
function g() {
sstore(0x01, 0x0101)
}
function h(x) {
h(f(x, 0))
g()
}
function i() -> v, w {
v := 0x0202
w := 0x0303
}
let x, y := i()
h(x)
h(y)
}
// ----
// digraph CFG {
// nodesep=0.7;
// node[shape=box];
//
// Entry [label="Entry"];
// Entry -> Block0;
// Block0 [label="\
// [ ]\l\
// [ RET[h] RET[h] RET[i] ]\l\
// i\l\
// [ RET[h] RET[h] TMP[i, 0] TMP[i, 1] ]\l\
// [ RET[h] RET[h] TMP[i, 0] TMP[i, 1] ]\l\
// Assignment(x, y)\l\
// [ RET[h] RET[h] x y ]\l\
// [ RET[h] y RET[h] x ]\l\
// h\l\
// [ RET[h] y ]\l\
// [ RET[h] y ]\l\
// h\l\
// [ ]\l\
// [ ]\l\
// "];
// Block0Exit [label="MainExit"];
// Block0 -> Block0Exit;
//
// FunctionEntry_f [label="function f(a, b) -> r\l\
// [ RET b a ]"];
// FunctionEntry_f -> Block1;
// Block1 [label="\
// [ RET a b ]\l\
// [ RET a b a ]\l\
// add\l\
// [ RET a TMP[add, 0] ]\l\
// [ RET a TMP[add, 0] ]\l\
// Assignment(x)\l\
// [ RET a x ]\l\
// [ RET a x ]\l\
// sub\l\
// [ RET TMP[sub, 0] ]\l\
// [ RET TMP[sub, 0] ]\l\
// Assignment(r)\l\
// [ RET r ]\l\
// [ r RET ]\l\
// "];
// Block1Exit [label="FunctionReturn[f]"];
// Block1 -> Block1Exit;
//
// FunctionEntry_g [label="function g()\l\
// [ RET ]"];
// FunctionEntry_g -> Block2;
// Block2 [label="\
// [ RET ]\l\
// [ RET 0x0101 0x01 ]\l\
// sstore\l\
// [ RET ]\l\
// [ RET ]\l\
// "];
// Block2Exit [label="FunctionReturn[g]"];
// Block2 -> Block2Exit;
//
// FunctionEntry_h [label="function h(x)\l\
// [ RET x ]"];
// FunctionEntry_h -> Block3;
// Block3 [label="\
// [ RET RET[h] RET[f] 0x00 x ]\l\
// [ RET RET[h] RET[f] 0x00 x ]\l\
// f\l\
// [ RET RET[h] TMP[f, 0] ]\l\
// [ RET RET[h] TMP[f, 0] ]\l\
// h\l\
// [ RET ]\l\
// [ RET RET[g] ]\l\
// g\l\
// [ RET ]\l\
// [ RET ]\l\
// "];
// Block3Exit [label="FunctionReturn[h]"];
// Block3 -> Block3Exit;
//
// FunctionEntry_i [label="function i() -> v, w\l\
// [ RET ]"];
// FunctionEntry_i -> Block4;
// Block4 [label="\
// [ RET ]\l\
// [ RET 0x0202 ]\l\
// Assignment(v)\l\
// [ RET v ]\l\
// [ v RET 0x0303 ]\l\
// Assignment(w)\l\
// [ v RET w ]\l\
// [ v w RET ]\l\
// "];
// Block4Exit [label="FunctionReturn[i]"];
// Block4 -> Block4Exit;
//
// }

View File

@ -0,0 +1,51 @@
{
sstore(0x01, 0x0101)
if calldataload(0) {
sstore(0x02, 0x0202)
}
sstore(0x03, 0x003)
}
// ----
// digraph CFG {
// nodesep=0.7;
// node[shape=box];
//
// Entry [label="Entry"];
// Entry -> Block0;
// Block0 [label="\
// [ ]\l\
// [ 0x0101 0x01 ]\l\
// sstore\l\
// [ ]\l\
// [ 0x00 ]\l\
// calldataload\l\
// [ TMP[calldataload, 0] ]\l\
// [ TMP[calldataload, 0] ]\l\
// "];
// Block0 -> Block0Exit;
// Block0Exit [label="{ TMP[calldataload, 0]| { <0> Zero | <1> NonZero }}" shape=Mrecord];
// Block0Exit:0 -> Block1;
// Block0Exit:1 -> Block2;
//
// Block1 [label="\
// [ ]\l\
// [ 0x03 0x03 ]\l\
// sstore\l\
// [ ]\l\
// [ ]\l\
// "];
// Block1Exit [label="MainExit"];
// Block1 -> Block1Exit;
//
// Block2 [label="\
// [ ]\l\
// [ 0x0202 0x02 ]\l\
// sstore\l\
// [ ]\l\
// [ ]\l\
// "];
// Block2 -> Block2Exit [arrowhead=none];
// Block2Exit [label="Jump" shape=oval];
// Block2Exit -> Block1;
//
// }

View File

@ -0,0 +1,17 @@
{
}
// ----
// digraph CFG {
// nodesep=0.7;
// node[shape=box];
//
// Entry [label="Entry"];
// Entry -> Block0;
// Block0 [label="\
// [ ]\l\
// [ ]\l\
// "];
// Block0Exit [label="MainExit"];
// Block0 -> Block0Exit;
//
// }

View File

@ -0,0 +1,107 @@
{
let x := 0x0101
let y := 0x0202
let z := 0x0303
switch sload(x)
case 0 {
x := 0x42
}
case 1 {
y := 0x42
}
default {
sstore(z, z)
}
sstore(0x0404, y)
}
// ----
// digraph CFG {
// nodesep=0.7;
// node[shape=box];
//
// Entry [label="Entry"];
// Entry -> Block0;
// Block0 [label="\
// [ ]\l\
// [ 0x0101 ]\l\
// Assignment(x)\l\
// [ x ]\l\
// [ x 0x0202 ]\l\
// Assignment(y)\l\
// [ x y ]\l\
// [ y x 0x0303 ]\l\
// Assignment(z)\l\
// [ y x z ]\l\
// [ y z x ]\l\
// sload\l\
// [ y z TMP[sload, 0] ]\l\
// [ y z TMP[sload, 0] ]\l\
// Assignment(GHOST[0])\l\
// [ y z GHOST[0] ]\l\
// [ y z GHOST[0] GHOST[0] 0x00 ]\l\
// eq\l\
// [ y z GHOST[0] TMP[eq, 0] ]\l\
// [ y z GHOST[0] TMP[eq, 0] ]\l\
// "];
// Block0 -> Block0Exit;
// Block0Exit [label="{ TMP[eq, 0]| { <0> Zero | <1> NonZero }}" shape=Mrecord];
// Block0Exit:0 -> Block1;
// Block0Exit:1 -> Block2;
//
// Block1 [label="\
// [ y z GHOST[0] ]\l\
// [ y z GHOST[0] 0x01 ]\l\
// eq\l\
// [ y z TMP[eq, 0] ]\l\
// [ y z TMP[eq, 0] ]\l\
// "];
// Block1 -> Block1Exit;
// Block1Exit [label="{ TMP[eq, 0]| { <0> Zero | <1> NonZero }}" shape=Mrecord];
// Block1Exit:0 -> Block3;
// Block1Exit:1 -> Block4;
//
// Block2 [label="\
// [ y JUNK JUNK ]\l\
// [ y 0x42 ]\l\
// Assignment(x)\l\
// [ y x ]\l\
// [ y ]\l\
// "];
// Block2 -> Block2Exit [arrowhead=none];
// Block2Exit [label="Jump" shape=oval];
// Block2Exit -> Block5;
//
// Block3 [label="\
// [ y z ]\l\
// [ y z z ]\l\
// sstore\l\
// [ y ]\l\
// [ y ]\l\
// "];
// Block3 -> Block3Exit [arrowhead=none];
// Block3Exit [label="Jump" shape=oval];
// Block3Exit -> Block5;
//
// Block4 [label="\
// [ JUNK JUNK ]\l\
// [ 0x42 ]\l\
// Assignment(y)\l\
// [ y ]\l\
// [ y ]\l\
// "];
// Block4 -> Block4Exit [arrowhead=none];
// Block4Exit [label="Jump" shape=oval];
// Block4Exit -> Block5;
//
// Block5 [label="\
// [ y ]\l\
// [ y 0x0404 ]\l\
// sstore\l\
// [ ]\l\
// [ ]\l\
// "];
// Block5Exit [label="MainExit"];
// Block5 -> Block5Exit;
//
// }

View File

@ -0,0 +1,51 @@
{
let x := calldataload(0)
let y := calldataload(2)
x := calldataload(3)
y := calldataload(4)
sstore(x,y)
}
// ----
// digraph CFG {
// nodesep=0.7;
// node[shape=box];
//
// Entry [label="Entry"];
// Entry -> Block0;
// Block0 [label="\
// [ ]\l\
// [ 0x00 ]\l\
// calldataload\l\
// [ TMP[calldataload, 0] ]\l\
// [ TMP[calldataload, 0] ]\l\
// Assignment(x)\l\
// [ x ]\l\
// [ 0x02 ]\l\
// calldataload\l\
// [ TMP[calldataload, 0] ]\l\
// [ TMP[calldataload, 0] ]\l\
// Assignment(y)\l\
// [ y ]\l\
// [ 0x03 ]\l\
// calldataload\l\
// [ TMP[calldataload, 0] ]\l\
// [ TMP[calldataload, 0] ]\l\
// Assignment(x)\l\
// [ x ]\l\
// [ x 0x04 ]\l\
// calldataload\l\
// [ x TMP[calldataload, 0] ]\l\
// [ x TMP[calldataload, 0] ]\l\
// Assignment(y)\l\
// [ x y ]\l\
// [ y x ]\l\
// sstore\l\
// [ ]\l\
// [ ]\l\
// "];
// Block0Exit [label="MainExit"];
// Block0 -> Block0Exit;
//
// }

View File

@ -38,6 +38,7 @@ add_executable(isoltest
../libyul/FunctionSideEffects.cpp
../libyul/ObjectCompilerTest.cpp
../libyul/SyntaxTest.cpp
../libyul/StackLayoutGeneratorTest.cpp
../libyul/YulOptimizerTest.cpp
../libyul/YulOptimizerTestCommon.cpp
../libyul/YulInterpreterTest.cpp