Add swarm hash to the end of the bytecode.

This commit is contained in:
chriseth 2016-11-24 10:32:52 +01:00
parent 36c6fe2b69
commit 91ecc4533d
8 changed files with 20 additions and 23 deletions

View File

@ -242,14 +242,10 @@ It can be used to query the compiler version, the sourcecode, the ABI
and NatSpec documentation in order to more safely interact with the contract
and to verify its source code.
The compiler inserts a swarm hash of that file into the bytecode of each
The compiler appends a swarm hash (32 bytes) of that file to the end of the bytecode of each
contract, so that you can retrieve the file in an authenticated way
without having to resort to a centralized data provider.
Specifically, the runtime code for a contract always starts with
``push32 <metadata hash> pop``, so you can take a look at the 32 bytes starting at
the second byte of the code of a contract.
Of course, you have to publish the metadata file to swarm (or some other service)
so that others can access it. The file can be output by using ``solc --metadata``.
It will contain swarm references to the source code, so you have to upload
@ -326,8 +322,7 @@ Usage for Automatic Interface Generation and NatSpec
The metadata is used in the following way: A component that wants to interact
with a contract (e.g. mist) retrieves the code of the contract
and from that the first 33 bytes. If the first byte decodes into a PUSH32
instruction, the other 32 bytes are interpreted as the swarm hash of
and from that the last 32 bytes, which are interpreted as the swarm hash of
a file which is then retrieved.
That file is JSON-decoded into a structure like above.

View File

@ -432,7 +432,7 @@ LinkerObject const& Assembly::assemble() const
unsigned bytesPerTag = dev::bytesRequired(bytesRequiredForCode);
byte tagPush = (byte)Instruction::PUSH1 - 1 + bytesPerTag;
unsigned bytesRequiredIncludingData = bytesRequiredForCode + 1;
unsigned bytesRequiredIncludingData = bytesRequiredForCode + 1 + m_auxiliaryData.size();
for (auto const& sub: m_subs)
bytesRequiredIncludingData += sub->assemble().bytecode.size();
@ -525,8 +525,9 @@ LinkerObject const& Assembly::assemble() const
}
}
if (!dataRef.empty() && !subRef.empty())
ret.bytecode.push_back(0);
// Append a STOP just to be sure.
ret.bytecode.push_back(0);
for (size_t i = 0; i < m_subs.size(); ++i)
{
auto references = subRef.equal_range(i);
@ -568,6 +569,9 @@ LinkerObject const& Assembly::assemble() const
}
ret.bytecode += dataItem.second;
}
ret.bytecode += m_auxiliaryData;
for (unsigned pos: sizeRef)
{
bytesRef r(ret.bytecode.data() + pos, bytesPerDataRef);

View File

@ -71,6 +71,9 @@ public:
AssemblyItem appendJumpI(AssemblyItem const& _tag) { auto ret = append(_tag.pushTag()); append(solidity::Instruction::JUMPI); return ret; }
AssemblyItem errorTag() { return AssemblyItem(PushTag, 0); }
/// Appends @a _data literally to the very end of the bytecode.
void appendAuxiliaryDataToEnd(bytes const& _data) { m_auxiliaryData += _data; }
template <class T> Assembly& operator<<(T const& _d) { append(_d); return *this; }
AssemblyItems const& items() const { return m_items; }
AssemblyItem const& back() const { return m_items.back(); }
@ -125,10 +128,12 @@ private:
Json::Value createJsonValue(std::string _name, int _begin, int _end, std::string _value = std::string(), std::string _jumpType = std::string()) const;
protected:
// 0 is reserved for exception
/// 0 is reserved for exception
unsigned m_usedTags = 1;
AssemblyItems m_items;
std::map<h256, bytes> m_data;
/// Data that is appended to the very end of the contract.
bytes m_auxiliaryData;
std::vector<std::shared_ptr<Assembly>> m_subs;
std::map<h256, std::string> m_strings;
std::map<h256, std::string> m_libraries; ///< Identifiers of libraries to be linked.

View File

@ -65,6 +65,7 @@ SourceUnitAnnotation& SourceUnit::annotation() const
string Declaration::sourceUnitName() const
{
solAssert(!!m_scope, "");
ASTNode const* scope = m_scope;
while (dynamic_cast<Declaration const*>(scope) && dynamic_cast<Declaration const*>(scope)->m_scope)
scope = dynamic_cast<Declaration const*>(scope)->m_scope;

View File

@ -36,6 +36,7 @@ void Compiler::compileContract(
{
ContractCompiler runtimeCompiler(nullptr, m_runtimeContext, m_optimize);
runtimeCompiler.compileContract(_contract, _contracts);
m_runtimeContext.appendAuxiliaryData(_metadataHash.asBytes());
// This might modify m_runtimeContext because it can access runtime functions at
// creation time.
@ -43,9 +44,6 @@ void Compiler::compileContract(
m_runtimeSub = creationCompiler.compileConstructor(_contract, _contracts);
m_context.optimise(m_optimize, m_optimizeRuns);
solAssert(m_runtimeSub != size_t(-1), "");
m_context.injectMetadataHashIntoSub(m_runtimeSub, _metadataHash);
}
void Compiler::compileClone(

View File

@ -227,13 +227,6 @@ void CompilerContext::injectVersionStampIntoSub(size_t _subIndex)
sub.injectStart(fromBigEndian<u256>(binaryVersion()));
}
void CompilerContext::injectMetadataHashIntoSub(size_t _subIndex, h256 const& _metadataHash)
{
eth::Assembly& sub = m_asm->sub(_subIndex);
sub.injectStart(Instruction::POP);
sub.injectStart(u256(_metadataHash));
}
FunctionDefinition const& CompilerContext::resolveVirtualFunction(
FunctionDefinition const& _function,
vector<ContractDefinition const*>::const_iterator _searchStart

View File

@ -155,8 +155,8 @@ public:
/// Prepends "PUSH <compiler version number> POP"
void injectVersionStampIntoSub(size_t _subIndex);
/// Prepends "PUSH <metadata hash> POP"
void injectMetadataHashIntoSub(size_t _subIndex, h256 const& _metadataHash);
/// Appends arbitrary data to the end of the bytecode.
void appendAuxiliaryData(bytes const& _data) { m_asm->appendAuxiliaryDataToEnd(_data); }
void optimise(bool _fullOptimsation, unsigned _runs = 200) { m_asm->optimise(_fullOptimsation, true, _runs); }

View File

@ -375,6 +375,7 @@ Json::Value const& CompilerStack::metadata(Contract const& _contract, Documentat
if (!m_parseSuccessful)
BOOST_THROW_EXCEPTION(CompilerError() << errinfo_comment("Parsing was not successful."));
solAssert(_contract.contract, "");
std::unique_ptr<Json::Value const>* doc;
// checks wheather we already have the documentation