Merge pull request #12445 from ethereum/pylint-enable-more-warnings

[pylint] Enable more warnings
This commit is contained in:
chriseth 2021-12-21 18:20:52 +01:00 committed by GitHub
commit 0769e4df5d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
12 changed files with 81 additions and 84 deletions

View File

@ -63,7 +63,7 @@ master_doc = 'index'
# General information about the project.
project = 'Solidity'
copyright = '2016-2021, Ethereum'
project_copyright = '2016-2021, Ethereum'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
@ -73,7 +73,7 @@ copyright = '2016-2021, Ethereum'
with open('../CMakeLists.txt', 'r', encoding='utf8') as f:
version = re.search('PROJECT_VERSION "([^"]+)"', f.read()).group(1)
# The full version, including alpha/beta/rc tags.
if os.path.isfile('../prerelease.txt') != True or os.path.getsize('../prerelease.txt') == 0:
if not os.path.isfile('../prerelease.txt') or os.path.getsize('../prerelease.txt') == 0:
release = version
else:
# This is a prerelease version

View File

@ -33,12 +33,12 @@ def parse_call(call):
return function.strip(), arguments.strip(), results.strip()
def colorize(left, right, id):
def colorize(left, right, index):
red = "\x1b[31m"
yellow = "\x1b[33m"
reset = "\x1b[0m"
colors = [red, yellow]
color = colors[id % len(colors)]
color = colors[index % len(colors)]
function, _arguments, _results = parse_call(right)
left = left.replace("compileAndRun", color + "compileAndRun" + reset)
right = right.replace("constructor", color + "constructor" + reset)

View File

@ -32,11 +32,11 @@ class Trace:
def get_input(self):
return self._input
def set_input(self, input):
def set_input(self, bytecode):
if self.kind == "create":
# remove cbor encoded metadata from bytecode
length = int(input[-4:], 16) * 2
self._input = input[:len(input) - length - 4]
length = int(bytecode[-4:], 16) * 2
self._input = bytecode[:len(bytecode) - length - 4]
def get_output(self):
return self._output
@ -110,21 +110,21 @@ class TraceAnalyser:
@staticmethod
def parse_parameters(line, trace):
input = re.search(r'\s*in:\s*([a-fA-F0-9]*)', line, re.M | re.I)
if input:
trace.input = input.group(1)
output = re.search(r'\s*out:\s*([a-fA-F0-9]*)', line, re.M | re.I)
if output:
trace.output = output.group(1)
result = re.search(r'\s*result:\s*([a-fA-F0-9]*)', line, re.M | re.I)
if result:
trace.result = result.group(1)
gas_used = re.search(r'\s*gas\sused:\s*([a-fA-F0-9]*)', line, re.M | re.I)
if gas_used:
trace.gas = gas_used.group(1)
value = re.search(r'\s*value:\s*([a-fA-F0-9]*)', line, re.M | re.I)
if value:
trace.value = value.group(1)
input_match = re.search(r'\s*in:\s*([a-fA-F0-9]*)', line, re.M | re.I)
if input_match:
trace.input = input_match.group(1)
output_match = re.search(r'\s*out:\s*([a-fA-F0-9]*)', line, re.M | re.I)
if output_match:
trace.output = output_match.group(1)
result_match = re.search(r'\s*result:\s*([a-fA-F0-9]*)', line, re.M | re.I)
if result_match:
trace.result = result_match.group(1)
gas_used_match = re.search(r'\s*gas\sused:\s*([a-fA-F0-9]*)', line, re.M | re.I)
if gas_used_match:
trace.gas = gas_used_match.group(1)
value_match = re.search(r'\s*value:\s*([a-fA-F0-9]*)', line, re.M | re.I)
if value_match:
trace.value = value_match.group(1)
def diff(self, analyser):
if not self.ready:
@ -154,7 +154,8 @@ class TraceAnalyser:
print(len(intersection), "test-cases - ", len(mismatches), " mismatche(s)")
def check_traces(self, test_name, left, right, mismatches):
@classmethod
def check_traces(cls, test_name, left, right, mismatches):
for trace_id, trace in enumerate(left.traces):
left_trace = trace
right_trace = right.traces[trace_id]

View File

@ -18,7 +18,7 @@ def read_file(file_name):
with open(file_name, "r", encoding="latin-1" if is_latin else ENCODING) as f:
content = f.read()
finally:
if content == None:
if content is None:
print(f"Error reading: {file_name}")
return content
@ -44,11 +44,11 @@ def find_ids_in_source_file(file_name, id_to_file_names):
if in_comment(source, m.start()):
continue
underscore_pos = m.group(0).index("_")
id = m.group(0)[0:underscore_pos]
if id in id_to_file_names:
id_to_file_names[id].append(file_name)
error_id = m.group(0)[0:underscore_pos]
if error_id in id_to_file_names:
id_to_file_names[error_id].append(file_name)
else:
id_to_file_names[id] = [file_name]
id_to_file_names[error_id] = [file_name]
def find_ids_in_source_files(file_names):
@ -76,16 +76,16 @@ def fix_ids_in_source_file(file_name, id_to_count, available_ids):
destination.extend(source[k:m.start()])
underscore_pos = m.group(0).index("_")
id = m.group(0)[0:underscore_pos]
error_id = m.group(0)[0:underscore_pos]
# incorrect id or id has a duplicate somewhere
if not in_comment(source, m.start()) and (len(id) != 4 or id[0] == "0" or id_to_count[id] > 1):
assert id in id_to_count
if not in_comment(source, m.start()) and (len(error_id) != 4 or error_id[0] == "0" or id_to_count[error_id] > 1):
assert error_id in id_to_count
new_id = get_next_id(available_ids)
assert new_id not in id_to_count
id_to_count[id] -= 1
id_to_count[error_id] -= 1
else:
new_id = id
new_id = error_id
destination.extend(new_id + "_error")
k = m.end()
@ -104,7 +104,7 @@ def fix_ids_in_source_files(file_names, id_to_count):
id_to_count contains number of appearances of every id in sources
"""
available_ids = {str(id) for id in range(1000, 10000)} - id_to_count.keys()
available_ids = {str(error_id) for error_id in range(1000, 10000)} - id_to_count.keys()
for file_name in file_names:
fix_ids_in_source_file(file_name, id_to_count, available_ids)
@ -113,8 +113,8 @@ def find_files(top_dir, sub_dirs, extensions):
"""Builds a list of files with given extensions in specified subdirectories"""
source_file_names = []
for dir in sub_dirs:
for root, _, file_names in os.walk(os.path.join(top_dir, dir), onerror=lambda e: exit(f"Walk error: {e}")):
for directory in sub_dirs:
for root, _, file_names in os.walk(os.path.join(top_dir, directory), onerror=lambda e: sys.exit(f"Walk error: {e}")):
for file_name in file_names:
_, ext = path.splitext(file_name)
if ext in extensions:
@ -145,27 +145,27 @@ def find_ids_in_cmdline_test_err(file_name):
def print_ids(ids):
for k, id in enumerate(sorted(ids)):
for k, error_id in enumerate(sorted(ids)):
if k % 10 > 0:
print(" ", end="")
elif k > 0:
print()
print(id, end="")
print(error_id, end="")
def print_ids_per_file(ids, id_to_file_names, top_dir):
file_name_to_ids = {}
for id in ids:
for file_name in id_to_file_names[id]:
for error_id in ids:
for file_name in id_to_file_names[error_id]:
relpath = path.relpath(file_name, top_dir)
if relpath not in file_name_to_ids:
file_name_to_ids[relpath] = []
file_name_to_ids[relpath].append(id)
file_name_to_ids[relpath].append(error_id)
for file_name in sorted(file_name_to_ids):
print(file_name)
for id in sorted(file_name_to_ids[file_name]):
print(f" {id}", end="")
for error_id in sorted(file_name_to_ids[file_name]):
print(f" {error_id}", end="")
print()
@ -277,7 +277,7 @@ def main(argv):
if [check, fix, examine_coverage, next_id].count(True) != 1:
print("usage: python error_codes.py --check | --fix [--no-confirm] | --examine-coverage | --next")
exit(1)
sys.exit(1)
cwd = os.getcwd()
@ -289,23 +289,23 @@ def main(argv):
source_id_to_file_names = find_ids_in_source_files(source_file_names)
ok = True
for id in sorted(source_id_to_file_names):
if len(id) != 4:
print(f"ID {id} length != 4")
for error_id in sorted(source_id_to_file_names):
if len(error_id) != 4:
print(f"ID {error_id} length != 4")
ok = False
if id[0] == "0":
print(f"ID {id} starts with zero")
if error_id[0] == "0":
print(f"ID {error_id} starts with zero")
ok = False
if len(source_id_to_file_names[id]) > 1:
print(f"ID {id} appears {len(source_id_to_file_names[id])} times")
if len(source_id_to_file_names[error_id]) > 1:
print(f"ID {error_id} appears {len(source_id_to_file_names[error_id])} times")
ok = False
if examine_coverage:
if not ok:
print("Incorrect IDs have to be fixed before applying --examine-coverage")
exit(1)
sys.exit(1)
res = 0 if examine_id_coverage(cwd, source_id_to_file_names) else 1
exit(res)
sys.exit(res)
ok &= examine_id_coverage(cwd, source_id_to_file_names, new_ids_only=True)
@ -314,18 +314,18 @@ def main(argv):
if next_id:
if not ok:
print("Incorrect IDs have to be fixed before applying --next")
exit(1)
available_ids = {str(id) for id in range(1000, 10000)} - source_id_to_file_names.keys()
sys.exit(1)
available_ids = {str(error_id) for error_id in range(1000, 10000)} - source_id_to_file_names.keys()
next_id = get_next_id(available_ids)
print(f"Next ID: {next_id}")
exit(0)
sys.exit(0)
if ok:
print("No incorrect IDs found")
exit(0)
sys.exit(0)
if check:
exit(1)
sys.exit(1)
assert fix, "Unexpected state, should not come here without --fix"
@ -338,14 +338,14 @@ def main(argv):
while len(answer) == 0 or answer not in "YNyn":
answer = input("[Y/N]? ")
if answer not in "yY":
exit(1)
sys.exit(1)
# number of appearances for every id
source_id_to_count = { id: len(file_names) for id, file_names in source_id_to_file_names.items() }
source_id_to_count = { error_id: len(file_names) for error_id, file_names in source_id_to_file_names.items() }
fix_ids_in_source_files(source_file_names, source_id_to_count)
print("Fixing completed")
exit(2)
sys.exit(2)
if __name__ == "__main__":

View File

@ -106,8 +106,8 @@ def write_cases(f, solidityTests, yulTests):
# When code examples are extracted they are indented by 8 spaces, which violates the style guide,
# so before checking remove 4 spaces from each line.
remainder = dedent(test)
hash = hashlib.sha256(test.encode("utf-8")).hexdigest()
sol_filename = f'test_{hash}_{cleaned_filename}.{language}'
source_code_hash = hashlib.sha256(test.encode("utf-8")).hexdigest()
sol_filename = f'test_{source_code_hash}_{cleaned_filename}.{language}'
with open(sol_filename, mode='w', encoding='utf8', newline='') as fi:
fi.write(remainder)

View File

@ -6,9 +6,9 @@ Runs pylint on all Python files in project directories known to contain Python s
from argparse import ArgumentParser
from os import path, walk
from sys import exit
from textwrap import dedent
import subprocess
import sys
PROJECT_ROOT = path.dirname(path.dirname(path.realpath(__file__)))
PYLINT_RCFILE = f"{PROJECT_ROOT}/scripts/pylintrc"
@ -89,7 +89,7 @@ def main():
success = pylint_all_filenames(options.dev_mode, rootdirs)
if not success:
exit(1)
sys.exit(1)
else:
print("No problems found.")
@ -98,4 +98,4 @@ if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
exit("Interrupted by user. Exiting.")
sys.exit("Interrupted by user. Exiting.")

View File

@ -14,25 +14,18 @@
# ATTENTION: This list should be extended with care, consider using NOLINT comments inside your
# python files instead, as the goal is actually to reduce the list of globally disabled checks.
#
# TODO: What could be eliminated in future PRs: bad-continuation, invalid-name, redefined-builtin,
# undefined-variable, unused-*, useless-object-inheritance.
# TODO: What could be eliminated in future PRs: invalid-name, pointless-string-statement, redefined-outer-name.
disable=
bad-continuation,
bad-indentation,
bad-whitespace,
consider-using-sys-exit,
duplicate-code,
invalid-name,
missing-docstring,
no-else-return,
no-self-use,
pointless-string-statement,
redefined-builtin,
redefined-outer-name,
singleton-comparison,
too-few-public-methods,
too-many-public-methods,
ungrouped-imports
too-many-public-methods
[BASIC]

View File

@ -41,7 +41,8 @@ class regressor:
"build/test/tools/ossfuzz")
self._logpath = os.path.join(self._repo_root, "test_results")
def parseCmdLine(self, description, args):
@classmethod
def parseCmdLine(cls, description, args):
argParser = ArgumentParser(description)
argParser.add_argument('-o', '--out-dir', required=True, type=str,
help="""Directory where test results will be written""")

View File

@ -40,7 +40,7 @@ def writeSourceToFile(lines):
filePath, srcName = extractSourceName(lines[0])
# print("sourceName is ", srcName)
# print("filePath is", filePath)
if filePath != False:
if filePath:
os.system("mkdir -p " + filePath)
with open(srcName, mode='a+', encoding='utf8', newline='') as f:
createdSources.append(srcName)

View File

@ -47,8 +47,8 @@ def write_cases(f, tests):
cleaned_filename = f.replace(".","_").replace("-","_").replace(" ","_").lower()
for test in tests:
remainder = re.sub(r'^ {4}', '', test, 0, re.MULTILINE)
hash = hashlib.sha256(test).hexdigest()
with open(f'test_{hash}_{cleaned_filename}.sol', 'w', encoding='utf8') as _f:
source_code_hash = hashlib.sha256(test).hexdigest()
with open(f'test_{source_code_hash}_{cleaned_filename}.sol', 'w', encoding='utf8') as _f:
_f.write(remainder)

View File

@ -39,6 +39,7 @@ class Rule:
self.error('Rule is incorrect.\nModel: ' + str(m))
self.solver.pop()
def error(self, msg):
@classmethod
def error(cls, msg):
print(msg)
sys.exit(1)

View File

@ -5,6 +5,7 @@ import fnmatch
import json
import os
import subprocess
import sys
import traceback
from typing import Any, List, Optional, Tuple, Union
@ -49,7 +50,7 @@ class JsonRpcProcess:
# Note, we should make use of timeout to avoid infinite blocking if nothing is received.
CONTENT_LENGTH_HEADER = "Content-Length: "
CONTENT_TYPE_HEADER = "Content-Type: "
if self.process.stdout == None:
if self.process.stdout is None:
return None
message_size = None
while True:
@ -83,7 +84,7 @@ class JsonRpcProcess:
return json_object
def send_message(self, method_name: str, params: Optional[dict]) -> None:
if self.process.stdin == None:
if self.process.stdin is None:
return
message = {
'jsonrpc': '2.0',
@ -245,7 +246,7 @@ class SolidityLSPTestSuite: # {{{
}
}
}
if expose_project_root == False:
if not expose_project_root:
params['rootUri'] = None
lsp.call_method('initialize', params)
lsp.send_notification('initialized')
@ -871,4 +872,4 @@ class SolidityLSPTestSuite: # {{{
if __name__ == "__main__":
suite = SolidityLSPTestSuite()
exit_code = suite.main()
exit(exit_code)
sys.exit(exit_code)