mirror of
https://github.com/ethereum/solidity
synced 2023-10-03 13:03:40 +00:00
lsp.py: Fix various problems with subdirectory edge cases
* fix properly path parsing with nested subdirectories * fix regex to be able to handle `file://..` * fix not checking all expected diagnostics * Some extra comments and renames
This commit is contained in:
parent
96398ee3ee
commit
6c9754a784
@ -222,7 +222,7 @@ defaults:
|
|||||||
command: pip install --user deepdiff colorama
|
command: pip install --user deepdiff colorama
|
||||||
- run:
|
- run:
|
||||||
name: Executing solc LSP test suite
|
name: Executing solc LSP test suite
|
||||||
command: ./test/lsp.py ./build/solc/solc
|
command: ./test/lsp.py ./build/solc/solc --non-interactive
|
||||||
- gitter_notify_failure_unless_pr
|
- gitter_notify_failure_unless_pr
|
||||||
|
|
||||||
- steps_build: &steps_build
|
- steps_build: &steps_build
|
||||||
@ -1352,7 +1352,7 @@ jobs:
|
|||||||
command: Get-Content ./test/lsp.py
|
command: Get-Content ./test/lsp.py
|
||||||
- run:
|
- run:
|
||||||
name: Executing solc LSP test suite
|
name: Executing solc LSP test suite
|
||||||
command: python ./test/lsp.py .\build\solc\Release\solc.exe
|
command: python ./test/lsp.py .\build\solc\Release\solc.exe --non-interactive
|
||||||
- store_test_results: *store_test_results
|
- store_test_results: *store_test_results
|
||||||
- store_artifacts: *artifacts_test_results
|
- store_artifacts: *artifacts_test_results
|
||||||
- gitter_notify_failure_unless_pr
|
- gitter_notify_failure_unless_pr
|
||||||
|
93
test/lsp.py
93
test/lsp.py
@ -69,7 +69,7 @@ TEST_REGEXES = TestRegexesTuple(
|
|||||||
re.compile(R'^// -> (?P<method>[\w\/]+) {'),
|
re.compile(R'^// -> (?P<method>[\w\/]+) {'),
|
||||||
re.compile(R'(?P<tag>"@\w+")'),
|
re.compile(R'(?P<tag>"@\w+")'),
|
||||||
re.compile(R'(?P<tag>@\w+)'),
|
re.compile(R'(?P<tag>@\w+)'),
|
||||||
re.compile(R'// (?P<testname>\w+):[ ]?(?P<diagnostics>[\w @]*)'),
|
re.compile(R'// (?P<testname>\S+):([ ](?P<diagnostics>[\w @]*))?'),
|
||||||
re.compile(R'(?P<tag>@\w+) (?P<code>\d\d\d\d)')
|
re.compile(R'(?P<tag>@\w+) (?P<code>\d\d\d\d)')
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -86,7 +86,7 @@ def split_path(path):
|
|||||||
"""
|
"""
|
||||||
Return the test name and the subdir path of the given path.
|
Return the test name and the subdir path of the given path.
|
||||||
"""
|
"""
|
||||||
sub_dir_separator = path.find("/")
|
sub_dir_separator = path.rfind("/")
|
||||||
|
|
||||||
if sub_dir_separator == -1:
|
if sub_dir_separator == -1:
|
||||||
return (path, None)
|
return (path, None)
|
||||||
@ -105,7 +105,8 @@ def count_index(lines, start=0):
|
|||||||
|
|
||||||
def tags_only(lines, start=0):
|
def tags_only(lines, start=0):
|
||||||
"""
|
"""
|
||||||
Filter the lines for tag comments and report line number that tags refer to.
|
Filter the lines for tag comments and report the line number that the tags
|
||||||
|
_refer_ to (which is not the line they are on!).
|
||||||
"""
|
"""
|
||||||
n = start
|
n = start
|
||||||
numCommentLines = 0
|
numCommentLines = 0
|
||||||
@ -272,27 +273,34 @@ def create_cli_parser() -> argparse.ArgumentParser:
|
|||||||
parser = argparse.ArgumentParser(description="Solidity LSP Test suite")
|
parser = argparse.ArgumentParser(description="Solidity LSP Test suite")
|
||||||
parser.set_defaults(fail_fast=False)
|
parser.set_defaults(fail_fast=False)
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"-f, --fail-fast",
|
"-f", "--fail-fast",
|
||||||
dest="fail_fast",
|
dest="fail_fast",
|
||||||
action="store_true",
|
action="store_true",
|
||||||
help="Terminates the running tests on first failure."
|
help="Terminates the running tests on first failure."
|
||||||
)
|
)
|
||||||
|
parser.set_defaults(non_interactive=False)
|
||||||
|
parser.add_argument(
|
||||||
|
"-n", "--non-interactive",
|
||||||
|
dest="non_interactive",
|
||||||
|
action="store_true",
|
||||||
|
help="Prevent interactive queries and just fail instead."
|
||||||
|
)
|
||||||
parser.set_defaults(trace_io=False)
|
parser.set_defaults(trace_io=False)
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"-T, --trace-io",
|
"-T", "--trace-io",
|
||||||
dest="trace_io",
|
dest="trace_io",
|
||||||
action="store_true",
|
action="store_true",
|
||||||
help="Be more verbose by also printing assertions."
|
help="Be more verbose by also printing assertions."
|
||||||
)
|
)
|
||||||
parser.set_defaults(print_assertions=False)
|
parser.set_defaults(print_assertions=False)
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"-v, --print-assertions",
|
"-v", "--print-assertions",
|
||||||
dest="print_assertions",
|
dest="print_assertions",
|
||||||
action="store_true",
|
action="store_true",
|
||||||
help="Be more verbose by also printing assertions."
|
help="Be more verbose by also printing assertions."
|
||||||
)
|
)
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"-t, --test-pattern",
|
"-t", "--test-pattern",
|
||||||
dest="test_pattern",
|
dest="test_pattern",
|
||||||
type=str,
|
type=str,
|
||||||
default="*",
|
default="*",
|
||||||
@ -405,11 +413,13 @@ class TestParser:
|
|||||||
|
|
||||||
testDiagnostics = []
|
testDiagnostics = []
|
||||||
|
|
||||||
for diagnosticMatch in TEST_REGEXES.diagnostic.finditer(fileDiagMatch.group("diagnostics")):
|
diagnostics_string = fileDiagMatch.group("diagnostics")
|
||||||
testDiagnostics.append(self.Diagnostic(
|
if diagnostics_string is not None:
|
||||||
diagnosticMatch.group("tag"),
|
for diagnosticMatch in TEST_REGEXES.diagnostic.finditer(diagnostics_string):
|
||||||
int(diagnosticMatch.group("code"))
|
testDiagnostics.append(self.Diagnostic(
|
||||||
))
|
diagnosticMatch.group("tag"),
|
||||||
|
int(diagnosticMatch.group("code"))
|
||||||
|
))
|
||||||
|
|
||||||
diagnostics["tests"][fileDiagMatch.group("testname")] = testDiagnostics
|
diagnostics["tests"][fileDiagMatch.group("testname")] = testDiagnostics
|
||||||
|
|
||||||
@ -537,11 +547,11 @@ class FileTestRunner:
|
|||||||
self.expected_diagnostics = next(self.parsed_testcases)
|
self.expected_diagnostics = next(self.parsed_testcases)
|
||||||
assert isinstance(self.expected_diagnostics, TestParser.Diagnostics) is True
|
assert isinstance(self.expected_diagnostics, TestParser.Diagnostics) is True
|
||||||
|
|
||||||
tests = self.expected_diagnostics.tests
|
expected_diagnostics_per_file = self.expected_diagnostics.tests
|
||||||
|
|
||||||
# Add our own test diagnostics if they didn't exist
|
# Add our own test diagnostics if they didn't exist
|
||||||
if self.test_name not in tests:
|
if self.test_name not in expected_diagnostics_per_file:
|
||||||
tests[self.test_name] = []
|
expected_diagnostics_per_file[self.test_name] = []
|
||||||
|
|
||||||
published_diagnostics = \
|
published_diagnostics = \
|
||||||
self.suite.open_file_and_wait_for_diagnostics(self.solc, self.test_name, self.sub_dir)
|
self.suite.open_file_and_wait_for_diagnostics(self.solc, self.test_name, self.sub_dir)
|
||||||
@ -551,25 +561,27 @@ class FileTestRunner:
|
|||||||
raise Exception(
|
raise Exception(
|
||||||
f"'{self.test_name}.sol' imported file outside of test directory: '{diagnostics['uri']}'"
|
f"'{self.test_name}.sol' imported file outside of test directory: '{diagnostics['uri']}'"
|
||||||
)
|
)
|
||||||
self.open_tests.append(diagnostics["uri"].replace(self.suite.project_root_uri + "/", "")[:-len(".sol")])
|
self.open_tests.append(self.suite.normalizeUri(diagnostics["uri"]))
|
||||||
|
|
||||||
self.suite.expect_equal(
|
self.suite.expect_equal(
|
||||||
len(published_diagnostics),
|
len(published_diagnostics),
|
||||||
len(tests),
|
len(expected_diagnostics_per_file),
|
||||||
description="Amount of reports does not match!")
|
description="Amount of reports does not match!")
|
||||||
|
|
||||||
for diagnostics in published_diagnostics:
|
for diagnostics_per_file in published_diagnostics:
|
||||||
testname_and_subdir = diagnostics["uri"].replace(self.suite.project_root_uri + "/", "")[:-len(".sol")]
|
testname, sub_dir = split_path(self.suite.normalizeUri(diagnostics_per_file['uri']))
|
||||||
testname, sub_dir = split_path(testname_and_subdir)
|
|
||||||
|
# Clear all processed expectations so we can check at the end
|
||||||
|
# what's missing
|
||||||
|
expected_diagnostics = expected_diagnostics_per_file.pop(testname, {})
|
||||||
|
|
||||||
expected_diagnostics = tests[testname]
|
|
||||||
self.suite.expect_equal(
|
self.suite.expect_equal(
|
||||||
len(diagnostics["diagnostics"]),
|
len(diagnostics_per_file["diagnostics"]),
|
||||||
len(expected_diagnostics),
|
len(expected_diagnostics),
|
||||||
description="Unexpected amount of diagnostics"
|
description="Unexpected amount of diagnostics"
|
||||||
)
|
)
|
||||||
markers = self.suite.get_file_tags(testname, sub_dir)
|
markers = self.suite.get_file_tags(testname, sub_dir)
|
||||||
for actual_diagnostic in diagnostics["diagnostics"]:
|
for actual_diagnostic in diagnostics_per_file["diagnostics"]:
|
||||||
expected_diagnostic = next((diagnostic for diagnostic in
|
expected_diagnostic = next((diagnostic for diagnostic in
|
||||||
expected_diagnostics if actual_diagnostic['range'] ==
|
expected_diagnostics if actual_diagnostic['range'] ==
|
||||||
markers[diagnostic.marker]), None)
|
markers[diagnostic.marker]), None)
|
||||||
@ -586,6 +598,12 @@ class FileTestRunner:
|
|||||||
marker=markers[expected_diagnostic.marker]
|
marker=markers[expected_diagnostic.marker]
|
||||||
)
|
)
|
||||||
|
|
||||||
|
if len(expected_diagnostics_per_file) > 0:
|
||||||
|
raise ExpectationFailed(
|
||||||
|
f"Expected diagnostics but received none for {expected_diagnostics_per_file}",
|
||||||
|
ExpectationFailed.Part.Diagnostics
|
||||||
|
)
|
||||||
|
|
||||||
except Exception:
|
except Exception:
|
||||||
self.close_all_open_files()
|
self.close_all_open_files()
|
||||||
raise
|
raise
|
||||||
@ -759,6 +777,7 @@ class SolidityLSPTestSuite: # {{{
|
|||||||
self.trace_io = args.trace_io
|
self.trace_io = args.trace_io
|
||||||
self.test_pattern = args.test_pattern
|
self.test_pattern = args.test_pattern
|
||||||
self.fail_fast = args.fail_fast
|
self.fail_fast = args.fail_fast
|
||||||
|
self.non_interactive = args.non_interactive
|
||||||
|
|
||||||
print(f"{SGR_NOTICE}test pattern: {self.test_pattern}{SGR_RESET}")
|
print(f"{SGR_NOTICE}test pattern: {self.test_pattern}{SGR_RESET}")
|
||||||
|
|
||||||
@ -773,6 +792,9 @@ class SolidityLSPTestSuite: # {{{
|
|||||||
if callable(getattr(SolidityLSPTestSuite, name)) and name.startswith("test_")
|
if callable(getattr(SolidityLSPTestSuite, name)) and name.startswith("test_")
|
||||||
])
|
])
|
||||||
filtered_tests = fnmatch.filter(all_tests, self.test_pattern)
|
filtered_tests = fnmatch.filter(all_tests, self.test_pattern)
|
||||||
|
if filtered_tests.count("generic") == 0:
|
||||||
|
filtered_tests.append("generic")
|
||||||
|
|
||||||
for method_name in filtered_tests:
|
for method_name in filtered_tests:
|
||||||
test_fn = getattr(self, 'test_' + method_name)
|
test_fn = getattr(self, 'test_' + method_name)
|
||||||
title: str = test_fn.__name__[5:]
|
title: str = test_fn.__name__[5:]
|
||||||
@ -889,22 +911,25 @@ class SolidityLSPTestSuite: # {{{
|
|||||||
|
|
||||||
return sorted(reports, key=lambda x: x['uri'])
|
return sorted(reports, key=lambda x: x['uri'])
|
||||||
|
|
||||||
|
def normalizeUri(self, uri):
|
||||||
|
return uri.replace(self.project_root_uri + "/", "")[:-len(".sol")]
|
||||||
|
|
||||||
def fetch_and_format_diagnostics(self, solc: JsonRpcProcess, test, sub_dir=None):
|
def fetch_and_format_diagnostics(self, solc: JsonRpcProcess, test, sub_dir=None):
|
||||||
expectations = ""
|
expectations = ""
|
||||||
|
|
||||||
published_diagnostics = self.open_file_and_wait_for_diagnostics(solc, test, sub_dir)
|
published_diagnostics = self.open_file_and_wait_for_diagnostics(solc, test, sub_dir)
|
||||||
|
|
||||||
for diagnostics in published_diagnostics:
|
for file_diagnostics in published_diagnostics:
|
||||||
testname = diagnostics["uri"].replace(f"{self.project_root_uri}/{sub_dir}/", "")[:-len(".sol")]
|
testname, local_sub_dir = split_path(self.normalizeUri(file_diagnostics["uri"]))
|
||||||
|
|
||||||
# Skip empty diagnostics within the same file
|
# Skip empty diagnostics within the same file
|
||||||
if len(diagnostics["diagnostics"]) == 0 and testname == test:
|
if len(file_diagnostics["diagnostics"]) == 0 and testname == test:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
expectations += f"// {testname}:"
|
expectations += f"// {testname}:"
|
||||||
|
|
||||||
for diagnostic in diagnostics["diagnostics"]:
|
for diagnostic in file_diagnostics["diagnostics"]:
|
||||||
tag = self.find_tag_with_range(testname, sub_dir, diagnostic['range'])
|
tag = self.find_tag_with_range(testname, local_sub_dir, diagnostic['range'])
|
||||||
|
|
||||||
if tag is None:
|
if tag is None:
|
||||||
raise Exception(f"No tag found for diagnostic range {diagnostic['range']}")
|
raise Exception(f"No tag found for diagnostic range {diagnostic['range']}")
|
||||||
@ -1121,6 +1146,11 @@ class SolidityLSPTestSuite: # {{{
|
|||||||
Asks the user how to proceed after an error.
|
Asks the user how to proceed after an error.
|
||||||
Returns True if the test/file should be ignored, otherwise False
|
Returns True if the test/file should be ignored, otherwise False
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
# Prevent user interaction when in non-interactive mode
|
||||||
|
if self.non_interactive:
|
||||||
|
return False
|
||||||
|
|
||||||
while True:
|
while True:
|
||||||
print("(u)pdate/(r)etry/(s)kip file?")
|
print("(u)pdate/(r)etry/(s)kip file?")
|
||||||
user_response = getCharFromStdin()
|
user_response = getCharFromStdin()
|
||||||
@ -1324,10 +1354,15 @@ class SolidityLSPTestSuite: # {{{
|
|||||||
|
|
||||||
for sub_dir in map(lambda filepath: filepath.name, sub_dirs):
|
for sub_dir in map(lambda filepath: filepath.name, sub_dirs):
|
||||||
tests = map(
|
tests = map(
|
||||||
lambda filename: filename[:-len(".sol")],
|
lambda filename, sd=sub_dir: sd + "/" + filename[:-len(".sol")],
|
||||||
os.listdir(f"{self.project_root_dir}/{sub_dir}")
|
os.listdir(f"{self.project_root_dir}/{sub_dir}")
|
||||||
)
|
)
|
||||||
|
|
||||||
|
tests = map(
|
||||||
|
lambda path, sd=sub_dir: path[len(sd)+1:],
|
||||||
|
fnmatch.filter(tests, self.test_pattern)
|
||||||
|
)
|
||||||
|
|
||||||
print(f"Running tests in subdirectory '{sub_dir}'...")
|
print(f"Running tests in subdirectory '{sub_dir}'...")
|
||||||
for test in tests:
|
for test in tests:
|
||||||
try_again = True
|
try_again = True
|
||||||
|
Loading…
Reference in New Issue
Block a user