Merge pull request #8131 from ethereum/python3

Use Python 3 instead of Python 2 (EOL'd)
This commit is contained in:
chriseth 2020-01-15 15:06:20 +01:00 committed by GitHub
commit 8bd1e7045a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 44 additions and 50 deletions

View File

@ -1,18 +1,18 @@
#!/usr/bin/env python #!/usr/bin/env python3
import sys import sys
import glob import glob
import subprocess import subprocess
import json import json
solc = sys.argv[1] SOLC_BIN = sys.argv[1]
report = open("report.txt", "wb") REPORT_FILE = open("report.txt", "wb")
for optimize in [False, True]: for optimize in [False, True]:
for f in sorted(glob.glob("*.sol")): for f in sorted(glob.glob("*.sol")):
sources = {} sources = {}
sources[f] = {'content': open(f, 'r').read()} sources[f] = {'content': open(f, 'r').read()}
input = { input_json = {
'language': 'Solidity', 'language': 'Solidity',
'sources': sources, 'sources': sources,
'settings': { 'settings': {
@ -22,20 +22,20 @@ for optimize in [False, True]:
'outputSelection': {'*': {'*': ['evm.bytecode.object', 'metadata']}} 'outputSelection': {'*': {'*': ['evm.bytecode.object', 'metadata']}}
} }
} }
args = [solc, '--standard-json'] args = [SOLC_BIN, '--standard-json']
if optimize: if optimize:
args += ['--optimize'] args += ['--optimize']
proc = subprocess.Popen(args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) proc = subprocess.Popen(args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(out, err) = proc.communicate(json.dumps(input)) (out, err) = proc.communicate(json.dumps(input_json))
try: try:
result = json.loads(out.strip()) result = json.loads(out.strip())
for filename in sorted(result['contracts'].keys()): for filename in sorted(result['contracts'].keys()):
for contractName in sorted(result['contracts'][filename].keys()): for contractName in sorted(result['contracts'][filename].keys()):
contractData = result['contracts'][filename][contractName] contractData = result['contracts'][filename][contractName]
if 'evm' in contractData and 'bytecode' in contractData['evm']: if 'evm' in contractData and 'bytecode' in contractData['evm']:
report.write(filename + ':' + contractName + ' ' + contractData['evm']['bytecode']['object'] + '\n') REPORT_FILE.write(filename + ':' + contractName + ' ' + contractData['evm']['bytecode']['object'] + '\n')
else: else:
report.write(filename + ':' + contractName + ' NO BYTECODE\n') REPORT_FILE.write(filename + ':' + contractName + ' NO BYTECODE\n')
report.write(filename + ':' + contractName + ' ' + contractData['metadata'] + '\n') REPORT_FILE.write(filename + ':' + contractName + ' ' + contractData['metadata'] + '\n')
except KeyError: except KeyError:
report.write(f + ": ERROR\n") REPORT_FILE.write(f + ": ERROR\n")

View File

@ -76,7 +76,7 @@ Build-Depends: debhelper (>= 9.0.0),
git, git,
libgmp-dev, libgmp-dev,
dh-python, dh-python,
python python3
Standards-Version: 3.9.6 Standards-Version: 3.9.6
Homepage: https://github.com/Z3Prover/z3 Homepage: https://github.com/Z3Prover/z3
Vcs-Git: git://github.com/Z3Prover/z3.git Vcs-Git: git://github.com/Z3Prover/z3.git

View File

@ -1,4 +1,4 @@
#!/usr/bin/env python2 #!/usr/bin/env python3
# #
# This script reads C++ or RST source files and writes all # This script reads C++ or RST source files and writes all
# multi-line strings into individual files. # multi-line strings into individual files.
@ -8,12 +8,9 @@
import sys import sys
import re import re
import os
import hashlib
from os.path import join
def extract_test_cases(path): def extract_test_cases(_path):
lines = open(path, 'rb').read().splitlines() lines = open(_path, 'rb').read().splitlines()
inside = False inside = False
delimiter = '' delimiter = ''
@ -42,8 +39,5 @@ def extract_test_cases(path):
inside = True inside = True
delimiter = m.group(1) delimiter = m.group(1)
if __name__ == '__main__': if __name__ == '__main__':
path = sys.argv[1] extract_test_cases(sys.argv[1])
extract_test_cases(path)

View File

@ -43,7 +43,6 @@ import os
import subprocess import subprocess
import sys import sys
def readDependencies(fname): def readDependencies(fname):
with open(fname) as f: with open(fname) as f:
o = subprocess.Popen(['otool', '-L', fname], stdout=subprocess.PIPE) o = subprocess.Popen(['otool', '-L', fname], stdout=subprocess.PIPE)
@ -55,7 +54,7 @@ def readDependencies(fname):
command = "install_name_tool -change " + \ command = "install_name_tool -change " + \
library + " @executable_path/./" + \ library + " @executable_path/./" + \
os.path.basename(library) + " " + fname os.path.basename(library) + " " + fname
print command print(command)
os.system("chmod +w " + fname) os.system("chmod +w " + fname)
os.system(command) os.system(command)

View File

@ -1,4 +1,4 @@
#!/usr/bin/env python2 #!/usr/bin/env python3
# #
# This script reads C++ or RST source files and writes all # This script reads C++ or RST source files and writes all
# multi-line strings into individual files. # multi-line strings into individual files.
@ -13,7 +13,7 @@ import hashlib
from os.path import join, isfile from os.path import join, isfile
def extract_test_cases(path): def extract_test_cases(path):
lines = open(path, 'rb').read().splitlines() lines = open(path, 'r').read().splitlines()
inside = False inside = False
delimiter = '' delimiter = ''
@ -43,7 +43,7 @@ def extract_docs_cases(path):
tests = [] tests = []
# Collect all snippets of indented blocks # Collect all snippets of indented blocks
for l in open(path, 'rb').read().splitlines(): for l in open(path, 'r').read().splitlines():
if l != '': if l != '':
if not inside and l.startswith(' '): if not inside and l.startswith(' '):
# start new test # start new test
@ -71,7 +71,8 @@ def write_cases(f, tests):
# When code examples are extracted they indented by 8 spaces, which violates the style guide, # When code examples are extracted they indented by 8 spaces, which violates the style guide,
# so before checking remove 4 spaces from each line. # so before checking remove 4 spaces from each line.
remainder = re.sub(r'^ {4}', '', test, 0, re.MULTILINE) remainder = re.sub(r'^ {4}', '', test, 0, re.MULTILINE)
open('test_%s_%s.sol' % (hashlib.sha256(test).hexdigest(), cleaned_filename), 'wb').write(remainder) sol_filename = 'test_%s_%s.sol' % (hashlib.sha256(test.encode("utf-8")).hexdigest(), cleaned_filename)
open(sol_filename, 'w').write(remainder)
def extract_and_write(f, path): def extract_and_write(f, path):
if docs: if docs:

View File

@ -49,7 +49,7 @@ cp $REPO_ROOT/build/solc/solc $ZIP_TEMP_DIR
# being for kernel-level dylibs. # being for kernel-level dylibs.
if [[ "$OSTYPE" == "darwin"* ]]; then if [[ "$OSTYPE" == "darwin"* ]]; then
python $REPO_ROOT/scripts/fix_homebrew_paths_in_standalone_zip.py $ZIP_TEMP_DIR python3 $REPO_ROOT/scripts/fix_homebrew_paths_in_standalone_zip.py $ZIP_TEMP_DIR
fi fi
# And ZIP it all up, with a filename suffix passed in on the command-line. # And ZIP it all up, with a filename suffix passed in on the command-line.

View File

@ -12,7 +12,7 @@ for new_proof in $(git diff origin/develop --name-only test/formal/)
do do
set +e set +e
echo "Proving $new_proof..." echo "Proving $new_proof..."
output=$(python "$new_proof") output=$(python3 "$new_proof")
result=$? result=$?
set -e set -e

View File

@ -1,4 +1,4 @@
#!/usr/bin/env python2 #!/usr/bin/env python3
# #
# This script reads a syntaxTest file and writes all # This script reads a syntaxTest file and writes all
# sources into their own files. If one source-name specifies subdirectories # sources into their own files. If one source-name specifies subdirectories
@ -57,6 +57,6 @@ if __name__ == '__main__':
srcString = "" srcString = ""
for src in createdSources: for src in createdSources:
srcString += src + ' ' srcString += src + ' '
print srcString print(srcString)
else: else:
sys.exit(1) sys.exit(1)

View File

@ -1,4 +1,4 @@
#!/usr/bin/env python2 #!/usr/bin/env python3
# #
# This script is used to generate the list of bugs per compiler version # This script is used to generate the list of bugs per compiler version
# from the list of bugs. # from the list of bugs.