pylint: Enable and fix redefined-builtin warnings

This commit is contained in:
Kamil Śliwak 2021-12-21 15:26:54 +01:00
parent 5b10ff1216
commit dece5f4de2
7 changed files with 54 additions and 55 deletions

View File

@ -63,7 +63,7 @@ master_doc = 'index'
# General information about the project. # General information about the project.
project = 'Solidity' project = 'Solidity'
copyright = '2016-2021, Ethereum' project_copyright = '2016-2021, Ethereum'
# The version info for the project you're documenting, acts as replacement for # The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the # |version| and |release|, also used in various other places throughout the

View File

@ -33,12 +33,12 @@ def parse_call(call):
return function.strip(), arguments.strip(), results.strip() return function.strip(), arguments.strip(), results.strip()
def colorize(left, right, id): def colorize(left, right, index):
red = "\x1b[31m" red = "\x1b[31m"
yellow = "\x1b[33m" yellow = "\x1b[33m"
reset = "\x1b[0m" reset = "\x1b[0m"
colors = [red, yellow] colors = [red, yellow]
color = colors[id % len(colors)] color = colors[index % len(colors)]
function, _arguments, _results = parse_call(right) function, _arguments, _results = parse_call(right)
left = left.replace("compileAndRun", color + "compileAndRun" + reset) left = left.replace("compileAndRun", color + "compileAndRun" + reset)
right = right.replace("constructor", color + "constructor" + reset) right = right.replace("constructor", color + "constructor" + reset)

View File

@ -32,11 +32,11 @@ class Trace:
def get_input(self): def get_input(self):
return self._input return self._input
def set_input(self, input): def set_input(self, bytecode):
if self.kind == "create": if self.kind == "create":
# remove cbor encoded metadata from bytecode # remove cbor encoded metadata from bytecode
length = int(input[-4:], 16) * 2 length = int(bytecode[-4:], 16) * 2
self._input = input[:len(input) - length - 4] self._input = bytecode[:len(bytecode) - length - 4]
def get_output(self): def get_output(self):
return self._output return self._output
@ -110,21 +110,21 @@ class TraceAnalyser:
@staticmethod @staticmethod
def parse_parameters(line, trace): def parse_parameters(line, trace):
input = re.search(r'\s*in:\s*([a-fA-F0-9]*)', line, re.M | re.I) input_match = re.search(r'\s*in:\s*([a-fA-F0-9]*)', line, re.M | re.I)
if input: if input_match:
trace.input = input.group(1) trace.input = input_match.group(1)
output = re.search(r'\s*out:\s*([a-fA-F0-9]*)', line, re.M | re.I) output_match = re.search(r'\s*out:\s*([a-fA-F0-9]*)', line, re.M | re.I)
if output: if output_match:
trace.output = output.group(1) trace.output = output_match.group(1)
result = re.search(r'\s*result:\s*([a-fA-F0-9]*)', line, re.M | re.I) result_match = re.search(r'\s*result:\s*([a-fA-F0-9]*)', line, re.M | re.I)
if result: if result_match:
trace.result = result.group(1) trace.result = result_match.group(1)
gas_used = re.search(r'\s*gas\sused:\s*([a-fA-F0-9]*)', line, re.M | re.I) gas_used_match = re.search(r'\s*gas\sused:\s*([a-fA-F0-9]*)', line, re.M | re.I)
if gas_used: if gas_used_match:
trace.gas = gas_used.group(1) trace.gas = gas_used_match.group(1)
value = re.search(r'\s*value:\s*([a-fA-F0-9]*)', line, re.M | re.I) value_match = re.search(r'\s*value:\s*([a-fA-F0-9]*)', line, re.M | re.I)
if value: if value_match:
trace.value = value.group(1) trace.value = value_match.group(1)
def diff(self, analyser): def diff(self, analyser):
if not self.ready: if not self.ready:

View File

@ -44,11 +44,11 @@ def find_ids_in_source_file(file_name, id_to_file_names):
if in_comment(source, m.start()): if in_comment(source, m.start()):
continue continue
underscore_pos = m.group(0).index("_") underscore_pos = m.group(0).index("_")
id = m.group(0)[0:underscore_pos] error_id = m.group(0)[0:underscore_pos]
if id in id_to_file_names: if error_id in id_to_file_names:
id_to_file_names[id].append(file_name) id_to_file_names[error_id].append(file_name)
else: else:
id_to_file_names[id] = [file_name] id_to_file_names[error_id] = [file_name]
def find_ids_in_source_files(file_names): def find_ids_in_source_files(file_names):
@ -76,16 +76,16 @@ def fix_ids_in_source_file(file_name, id_to_count, available_ids):
destination.extend(source[k:m.start()]) destination.extend(source[k:m.start()])
underscore_pos = m.group(0).index("_") underscore_pos = m.group(0).index("_")
id = m.group(0)[0:underscore_pos] error_id = m.group(0)[0:underscore_pos]
# incorrect id or id has a duplicate somewhere # incorrect id or id has a duplicate somewhere
if not in_comment(source, m.start()) and (len(id) != 4 or id[0] == "0" or id_to_count[id] > 1): if not in_comment(source, m.start()) and (len(error_id) != 4 or error_id[0] == "0" or id_to_count[error_id] > 1):
assert id in id_to_count assert error_id in id_to_count
new_id = get_next_id(available_ids) new_id = get_next_id(available_ids)
assert new_id not in id_to_count assert new_id not in id_to_count
id_to_count[id] -= 1 id_to_count[error_id] -= 1
else: else:
new_id = id new_id = error_id
destination.extend(new_id + "_error") destination.extend(new_id + "_error")
k = m.end() k = m.end()
@ -104,7 +104,7 @@ def fix_ids_in_source_files(file_names, id_to_count):
id_to_count contains number of appearances of every id in sources id_to_count contains number of appearances of every id in sources
""" """
available_ids = {str(id) for id in range(1000, 10000)} - id_to_count.keys() available_ids = {str(error_id) for error_id in range(1000, 10000)} - id_to_count.keys()
for file_name in file_names: for file_name in file_names:
fix_ids_in_source_file(file_name, id_to_count, available_ids) fix_ids_in_source_file(file_name, id_to_count, available_ids)
@ -113,8 +113,8 @@ def find_files(top_dir, sub_dirs, extensions):
"""Builds a list of files with given extensions in specified subdirectories""" """Builds a list of files with given extensions in specified subdirectories"""
source_file_names = [] source_file_names = []
for dir in sub_dirs: for directory in sub_dirs:
for root, _, file_names in os.walk(os.path.join(top_dir, dir), onerror=lambda e: exit(f"Walk error: {e}")): for root, _, file_names in os.walk(os.path.join(top_dir, directory), onerror=lambda e: sys.exit(f"Walk error: {e}")):
for file_name in file_names: for file_name in file_names:
_, ext = path.splitext(file_name) _, ext = path.splitext(file_name)
if ext in extensions: if ext in extensions:
@ -145,27 +145,27 @@ def find_ids_in_cmdline_test_err(file_name):
def print_ids(ids): def print_ids(ids):
for k, id in enumerate(sorted(ids)): for k, error_id in enumerate(sorted(ids)):
if k % 10 > 0: if k % 10 > 0:
print(" ", end="") print(" ", end="")
elif k > 0: elif k > 0:
print() print()
print(id, end="") print(error_id, end="")
def print_ids_per_file(ids, id_to_file_names, top_dir): def print_ids_per_file(ids, id_to_file_names, top_dir):
file_name_to_ids = {} file_name_to_ids = {}
for id in ids: for error_id in ids:
for file_name in id_to_file_names[id]: for file_name in id_to_file_names[error_id]:
relpath = path.relpath(file_name, top_dir) relpath = path.relpath(file_name, top_dir)
if relpath not in file_name_to_ids: if relpath not in file_name_to_ids:
file_name_to_ids[relpath] = [] file_name_to_ids[relpath] = []
file_name_to_ids[relpath].append(id) file_name_to_ids[relpath].append(error_id)
for file_name in sorted(file_name_to_ids): for file_name in sorted(file_name_to_ids):
print(file_name) print(file_name)
for id in sorted(file_name_to_ids[file_name]): for error_id in sorted(file_name_to_ids[file_name]):
print(f" {id}", end="") print(f" {error_id}", end="")
print() print()
@ -289,15 +289,15 @@ def main(argv):
source_id_to_file_names = find_ids_in_source_files(source_file_names) source_id_to_file_names = find_ids_in_source_files(source_file_names)
ok = True ok = True
for id in sorted(source_id_to_file_names): for error_id in sorted(source_id_to_file_names):
if len(id) != 4: if len(error_id) != 4:
print(f"ID {id} length != 4") print(f"ID {error_id} length != 4")
ok = False ok = False
if id[0] == "0": if error_id[0] == "0":
print(f"ID {id} starts with zero") print(f"ID {error_id} starts with zero")
ok = False ok = False
if len(source_id_to_file_names[id]) > 1: if len(source_id_to_file_names[error_id]) > 1:
print(f"ID {id} appears {len(source_id_to_file_names[id])} times") print(f"ID {error_id} appears {len(source_id_to_file_names[error_id])} times")
ok = False ok = False
if examine_coverage: if examine_coverage:
@ -315,7 +315,7 @@ def main(argv):
if not ok: if not ok:
print("Incorrect IDs have to be fixed before applying --next") print("Incorrect IDs have to be fixed before applying --next")
sys.exit(1) sys.exit(1)
available_ids = {str(id) for id in range(1000, 10000)} - source_id_to_file_names.keys() available_ids = {str(error_id) for error_id in range(1000, 10000)} - source_id_to_file_names.keys()
next_id = get_next_id(available_ids) next_id = get_next_id(available_ids)
print(f"Next ID: {next_id}") print(f"Next ID: {next_id}")
sys.exit(0) sys.exit(0)
@ -341,7 +341,7 @@ def main(argv):
sys.exit(1) sys.exit(1)
# number of appearances for every id # number of appearances for every id
source_id_to_count = { id: len(file_names) for id, file_names in source_id_to_file_names.items() } source_id_to_count = { error_id: len(file_names) for error_id, file_names in source_id_to_file_names.items() }
fix_ids_in_source_files(source_file_names, source_id_to_count) fix_ids_in_source_files(source_file_names, source_id_to_count)
print("Fixing completed") print("Fixing completed")

View File

@ -106,8 +106,8 @@ def write_cases(f, solidityTests, yulTests):
# When code examples are extracted they are indented by 8 spaces, which violates the style guide, # When code examples are extracted they are indented by 8 spaces, which violates the style guide,
# so before checking remove 4 spaces from each line. # so before checking remove 4 spaces from each line.
remainder = dedent(test) remainder = dedent(test)
hash = hashlib.sha256(test.encode("utf-8")).hexdigest() source_code_hash = hashlib.sha256(test.encode("utf-8")).hexdigest()
sol_filename = f'test_{hash}_{cleaned_filename}.{language}' sol_filename = f'test_{source_code_hash}_{cleaned_filename}.{language}'
with open(sol_filename, mode='w', encoding='utf8', newline='') as fi: with open(sol_filename, mode='w', encoding='utf8', newline='') as fi:
fi.write(remainder) fi.write(remainder)

View File

@ -14,7 +14,7 @@
# ATTENTION: This list should be extended with care, consider using NOLINT comments inside your # ATTENTION: This list should be extended with care, consider using NOLINT comments inside your
# python files instead, as the goal is actually to reduce the list of globally disabled checks. # python files instead, as the goal is actually to reduce the list of globally disabled checks.
# #
# TODO: What could be eliminated in future PRs: bad-continuation, invalid-name, redefined-builtin, # TODO: What could be eliminated in future PRs: bad-continuation, invalid-name,
# undefined-variable, unused-*, useless-object-inheritance. # undefined-variable, unused-*, useless-object-inheritance.
disable= disable=
bad-continuation, bad-continuation,
@ -25,7 +25,6 @@ disable=
missing-docstring, missing-docstring,
no-else-return, no-else-return,
pointless-string-statement, pointless-string-statement,
redefined-builtin,
redefined-outer-name, redefined-outer-name,
too-few-public-methods, too-few-public-methods,
too-many-public-methods, too-many-public-methods,

View File

@ -47,8 +47,8 @@ def write_cases(f, tests):
cleaned_filename = f.replace(".","_").replace("-","_").replace(" ","_").lower() cleaned_filename = f.replace(".","_").replace("-","_").replace(" ","_").lower()
for test in tests: for test in tests:
remainder = re.sub(r'^ {4}', '', test, 0, re.MULTILINE) remainder = re.sub(r'^ {4}', '', test, 0, re.MULTILINE)
hash = hashlib.sha256(test).hexdigest() source_code_hash = hashlib.sha256(test).hexdigest()
with open(f'test_{hash}_{cleaned_filename}.sol', 'w', encoding='utf8') as _f: with open(f'test_{source_code_hash}_{cleaned_filename}.sol', 'w', encoding='utf8') as _f:
_f.write(remainder) _f.write(remainder)