Commit fec32835 authored by Johannes Bechberger's avatar Johannes Bechberger

Add asts test runner

parent 00d02cb3
...@@ -16,7 +16,7 @@ The test cases are divided in 5 'modes': ...@@ -16,7 +16,7 @@ The test cases are divided in 5 'modes':
- __lexer__: Test cases that check the lexed token (and their correct output) - __lexer__: Test cases that check the lexed token (and their correct output)
- __syntax__: Test cases that just check whether `./run --parsecheck` accepts as correct or rejects - __syntax__: Test cases that just check whether `./run --parsecheck` accepts as correct or rejects
them. them.
- __ast__: Test cases that check the generated ast. - __ast__: Test cases that check the generated ast by using the pretty printing functionality.
- __semantic__: Test cases that check semantic checking of MiniJava programs - __semantic__: Test cases that check semantic checking of MiniJava programs
- __exec__: Test cases that check the correct compilation of MiniJava programs. - __exec__: Test cases that check the correct compilation of MiniJava programs.
...@@ -74,14 +74,13 @@ Test types for the ast mode ...@@ -74,14 +74,13 @@ Test types for the ast mode
<tr><th>File ending(s) of test cases</th><th>Expected behaviour to complete a test of this type</th></tr> <tr><th>File ending(s) of test cases</th><th>Expected behaviour to complete a test of this type</th></tr>
<tr> <tr>
<td><code>.valid.mj</code> <code>.mj</code> <td><code>.valid.mj</code> <code>.mj</code>
<td>Return code is <code>0</code> and the output matches the expected output (located in the file `[test file].out`</td> <td>Pretty printing the source file should result in the same output as pretty printing the already pretty printed file.
</tr> The sorted lexer output for the last should be the same as the sorted lexer output for the source file.</td>
<tr>
<td><code>.invalid.mj</code>
<td>Return code is <code>&gt; 0</code> and the error output contains the word <code>error</code></td>
</tr> </tr>
</table> </table>
It uses all syntax mode tests implicitly.
Test runner Test runner
----------- -----------
......
...@@ -20,6 +20,10 @@ class TestMode: ...@@ -20,6 +20,10 @@ class TestMode:
exec = "exec" exec = "exec"
USE_TESTS_OF_OTHER = {
ast: [syntax]
}
""" All 'success' tests of the n.th mode can used as 'success' tests for the n-1.th mode""" """ All 'success' tests of the n.th mode can used as 'success' tests for the n-1.th mode"""
TEST_MODES = [TestMode.lexer, TestMode.syntax, TestMode.ast, TestMode.semantic, TestMode.exec] TEST_MODES = [TestMode.lexer, TestMode.syntax, TestMode.ast, TestMode.semantic, TestMode.exec]
...@@ -102,7 +106,7 @@ class Environment: ...@@ -102,7 +106,7 @@ class Environment:
mode_flag = { mode_flag = {
TestMode.lexer: "--lextest", TestMode.lexer: "--lextest",
TestMode.syntax: "--parsetest", TestMode.syntax: "--parsetest",
TestMode.ast: "--parse-ast" TestMode.ast: "--pretty-print"
}[mode] }[mode]
cmd = [self.mj_run_cmd, mode_flag] + list(args) cmd = [self.mj_run_cmd, mode_flag] + list(args)
return execute(cmd, timeout=self.timeout) return execute(cmd, timeout=self.timeout)
......
import difflib
import os
import shutil, logging import shutil, logging
from typing import Tuple
from mjtest.environment import Environment, TestMode from mjtest.environment import Environment, TestMode
from mjtest.test.syntax_tests import BasicSyntaxTest
from mjtest.test.tests import TestCase, BasicDiffTestResult, BasicTestResult from mjtest.test.tests import TestCase, BasicDiffTestResult, BasicTestResult
from os import path from os import path
_LOG = logging.getLogger("tests") _LOG = logging.getLogger("tests")
class ASTDiffTest(TestCase): class ASTPrettyPrintTest(BasicSyntaxTest):
FILE_ENDINGS = [".invalid.mj", ".valid.mj", ".mj"] FILE_ENDINGS = [".mj", ".valid.mj"]
OUTPUT_FILE_ENDING = ".out" INVALID_FILE_ENDINGS = [".invalid.mj"]
MODE = TestMode.ast
def __init__(self, env: Environment, type: str, file: str): def __init__(self, env: Environment, type: str, file: str):
super().__init__(env, type, file) super().__init__(env, type, file)
self._should_succeed = not file.endswith(".invalid.mj")
self._expected_output_file = file + self.OUTPUT_FILE_ENDING def run(self) -> BasicTestResult:
self._has_expected_output_file = path.exists(self._expected_output_file) tmp_file = self.env.create_tmpfile()
rtcode, out, err = self._pretty_print(self.file, tmp_file)
def should_succeed(self) -> bool: if rtcode > 0:
return self._should_succeed os.remove(tmp_file)
return BasicTestResult(self, rtcode, out, err)
def short_name(self) -> str: _file = self.file
return path.basename(self.file) tmp_file2 = self.env.create_tmpfile()
rtcode, out2, err2 = self._pretty_print(tmp_file, tmp_file2)
def run(self) -> BasicDiffTestResult: if rtcode > 0:
out, err, rtcode = self.env.run_mj_command(self.MODE, self.file) os.remove(tmp_file2)
exp_out = "" btr = BasicTestResult(self, rtcode, out2, err2)
if rtcode == 0 and self.should_succeed(): btr.add_additional_text("Prior out", out)
if self._has_expected_output_file and self.type == self.MODE and self.env.mode == self.MODE: btr.add_additional_text("Prior err", err)
with open(self._expected_output_file, "r") as f: return btr
exp_out = f.read() rtcode_lex, out_lex, err_lex = self.env.run_mj_command(TestMode.lexer, self.file)
#else: rtcode_lex2, out_lex2, err_lex2 = self.env.run_mj_command(TestMode.lexer, tmp_file2)
# _LOG.error("Expected output file for test case {}:{} is missing.".format(self.MODE, self.short_name())) os.remove(tmp_file2)
if self.type == self.MODE and self.env.mode == self.MODE: out_lex = self._sort_lexed(out_lex.decode())
return BasicDiffTestResult(self, rtcode, out.decode(), err.decode(), exp_out) out_lex2 = self._sort_lexed(out_lex2.decode())
return BasicTestResult(self, rtcode, out.decode(), err.decode()) incorrect_msg, rtcode = "", 0
if rtcode_lex + rtcode_lex2:
class LexerDiffTest(ASTDiffTest): incorrect_msg, rtcode = "Lexing failed", 1
elif out != out2:
MODE = TestMode.lexer incorrect_msg, rtcode = "Not idempotent", 1
elif out_lex != out_lex2:
TestCase.TEST_CASE_CLASSES[TestMode.ast].append(ASTDiffTest) incorrect_msg, rtcode = "Sorted and lexed second pretty print differs from original", 1
TestCase.TEST_CASE_CLASSES[TestMode.lexer].append(LexerDiffTest) btr = BasicTestResult(self, rtcode, incorrect_msg=incorrect_msg)
\ No newline at end of file btr.add_additional_text("First round output", out)
btr.add_additional_text("Second round output", out2)
btr.add_additional_text("Diff", self._diff(out, out2))
btr.add_additional_text("Original file, sorted and lexed", out_lex)
btr.add_additional_text("Second round output, sorted and lexed", out_lex2)
btr.add_additional_text("Diff", self._diff(out_lex, out_lex2))
return btr
def _diff(self, first: str, second: str) -> str:
return "".join(difflib.Differ().compare(first.splitlines(True), second.splitlines(True)))
def _sort_lexed(self, lexed: str) -> str:
#return "".join(difflib.Differ().compare(self.expected_output.splitlines(True), self.output.splitlines(True)))
return "".join(sorted(lexed.splitlines(True)))
def _pretty_print(self, input_file: str, output_file: str) -> Tuple[int, str, str]:
out, err, rtcode = self.env.run_mj_command(TestMode.ast, input_file)
with open(output_file, "w") as f:
print(out, file=f)
return rtcode, out.decode(), err.decode()
TestCase.TEST_CASE_CLASSES["ast"].append(ASTPrettyPrintTest)
\ No newline at end of file
...@@ -31,7 +31,11 @@ class TestSuite: ...@@ -31,7 +31,11 @@ class TestSuite:
self._load_test_cases() self._load_test_cases()
def _load_test_cases(self): def _load_test_cases(self):
types = TEST_MODES#TEST_MODES[TEST_MODES.index(self.env.mode):] types = [self.env.mode]#TEST_MODES#TEST_MODES[TEST_MODES.index(self.env.mode):]
if self.env.ci_testing:
types = TEST_MODES
elif self.env.mode in TestMode.USE_TESTS_OF_OTHER:
types += TestMode.USE_TESTS_OF_OTHER[self.env.mode]
for type in types: for type in types:
self._load_test_case_type(type) self._load_test_case_type(type)
...@@ -54,21 +58,27 @@ class TestSuite: ...@@ -54,21 +58,27 @@ class TestSuite:
if len(t) > 0: if len(t) > 0:
self.correct_test_cases[mode].add(t) self.correct_test_cases[mode].add(t)
correct_test_cases.add(t) correct_test_cases.add(t)
m = mode
if m != self.env.mode and self.env.mode in TestMode.USE_TESTS_OF_OTHER and \
m in TestMode.USE_TESTS_OF_OTHER[self.env.mode]:
m = self.env.mode
for file in sorted(os.listdir(dir)): for file in sorted(os.listdir(dir)):
if not TestCase.has_valid_file_ending(mode, file): if not TestCase.has_valid_file_ending(m, file):
_LOG.debug("Skip file " + file) _LOG.debug("Skip file " + file)
elif self.env.only_incorrect_tests and file in correct_test_cases: elif self.env.only_incorrect_tests and file in correct_test_cases:
_LOG.info("Skip file {} as its test case was executed correctly the last run") _LOG.info("Skip file {} as its test case was executed correctly the last run")
else: else:
test_case = TestCase.create_from_file(self.env, mode, join(dir, file)) test_case = TestCase.create_from_file(self.env, m, join(dir, file))
if not test_case.can_run(): if not test_case:
pass
elif not test_case.can_run():
_LOG.debug("Skip test case '{}' because it isn't suited".format(test_case.name())) _LOG.debug("Skip test case '{}' because it isn't suited".format(test_case.name()))
else: else:
if mode not in self.test_cases: if m not in self.test_cases:
self.test_cases[mode] = [] self.test_cases[m] = []
self.test_cases[mode].append(test_case) self.test_cases[m].append(test_case)
if mode in self.test_cases and len(self.test_cases[mode]) == 0: if m in self.test_cases and len(self.test_cases[m]) == 0:
del self.test_cases[mode] del self.test_cases[m]
def _log_file_for_type(self, type: str): def _log_file_for_type(self, type: str):
return join(self.env.test_dir, type, ".mjtest_correct_testcases_" + self.env.mode) return join(self.env.test_dir, type, ".mjtest_correct_testcases_" + self.env.mode)
...@@ -185,6 +195,7 @@ class TestCase: ...@@ -185,6 +195,7 @@ class TestCase:
TEST_CASE_CLASSES = dict((k, []) for k in TEST_MODES) TEST_CASE_CLASSES = dict((k, []) for k in TEST_MODES)
FILE_ENDINGS = [] FILE_ENDINGS = []
INVALID_FILE_ENDINGS = []
def __init__(self, env: Environment, type: str, file: str): def __init__(self, env: Environment, type: str, file: str):
self.env = env self.env = env
...@@ -196,13 +207,14 @@ class TestCase: ...@@ -196,13 +207,14 @@ class TestCase:
def can_run(self, mode: str = "") -> bool: def can_run(self, mode: str = "") -> bool:
mode = mode or self.env.mode mode = mode or self.env.mode
same_mode = self.type == mode
types = TEST_MODES[TEST_MODES.index(self.env.mode):] types = TEST_MODES[TEST_MODES.index(self.env.mode):]
if self.env.ci_testing: if self.env.ci_testing:
return self.type == mode or \ return same_mode or \
(self.type in types and self.should_succeed()) or \ (self.type in types and self.should_succeed()) or \
(self.type not in types and not self.should_succeed()) (self.type not in types and not self.should_succeed())
else: else:
return self.type == mode return same_mode
def run(self) -> 'TestResult': def run(self) -> 'TestResult':
raise NotImplementedError() raise NotImplementedError()
...@@ -222,7 +234,8 @@ class TestCase: ...@@ -222,7 +234,8 @@ class TestCase:
@classmethod @classmethod
def _test_case_class_for_file(cls, type: str, file: str): def _test_case_class_for_file(cls, type: str, file: str):
for t in cls.TEST_CASE_CLASSES[type]: for t in cls.TEST_CASE_CLASSES[type]:
if any(file.endswith(e) for e in t.FILE_ENDINGS): if any(file.endswith(e) for e in t.FILE_ENDINGS) and \
not any(file.endswith(e) for e in t.INVALID_FILE_ENDINGS):
return t return t
return False return False
...@@ -257,12 +270,18 @@ class TestResult: ...@@ -257,12 +270,18 @@ class TestResult:
class BasicTestResult(TestResult): class BasicTestResult(TestResult):
def __init__(self, test_case: TestCase, error_code: int, output: str, error_output: str): def __init__(self, test_case: TestCase, error_code: int, output: str = None, error_output: str = None,
incorrect_msg: str = "incorrect return code"):
super().__init__(test_case, error_code) super().__init__(test_case, error_code)
self._incorrect_msg = incorrect_msg
self._contains_error_str = "error" in error_output self._contains_error_str = "error" in error_output
self.error_output = error_output self.error_output = error_output
self.output = output self.output = output
self.other_texts = [] # type: List[Tuple[str, str, bool]] self.other_texts = [] # type: List[Tuple[str, str, bool]]
if output:
self.add_additional_text("Output", output)
if error_output:
self.add_additional_text("Error output", error_output)
def is_correct(self): def is_correct(self):
if self.succeeded(): if self.succeeded():
...@@ -276,7 +295,7 @@ class BasicTestResult(TestResult): ...@@ -276,7 +295,7 @@ class BasicTestResult(TestResult):
else: else:
if not self.succeeded() and not self.test_case.should_succeed() and not self._contains_error_str: if not self.succeeded() and not self.test_case.should_succeed() and not self._contains_error_str:
return "the error output doesn't contain the word \"error\"" return "the error output doesn't contain the word \"error\""
return "incorrect return code" return self._incorrect_msg
def long_message(self) -> str: def long_message(self) -> str:
file_content = [] file_content = []
...@@ -299,19 +318,10 @@ Source file: ...@@ -299,19 +318,10 @@ Source file:
{} {}
Output:
{}
Error output:
{}
Return code: {} Return code: {}
{} {}
""".format(self.short_message().capitalize(), self._ident(file_content), """.format(self.short_message().capitalize(), self._ident(file_content), self.error_code,
self._ident(self.output), self._ident(self.error_output), self.error_code,
"\n".join(others)) "\n".join(others))
def add_additional_text(self, title: str, content: str): def add_additional_text(self, title: str, content: str):
...@@ -364,6 +374,43 @@ class BasicDiffTestResult(BasicTestResult): ...@@ -364,6 +374,43 @@ class BasicDiffTestResult(BasicTestResult):
return "incorrect return code" return "incorrect return code"
class DiffTest(TestCase):
FILE_ENDINGS = [".invalid.mj", ".valid.mj", ".mj"]
OUTPUT_FILE_ENDING = ".out"
MODE = TestMode.ast
def __init__(self, env: Environment, type: str, file: str):
super().__init__(env, type, file)
self._should_succeed = not file.endswith(".invalid.mj")
self._expected_output_file = file + self.OUTPUT_FILE_ENDING
self._has_expected_output_file = exists(self._expected_output_file)
def should_succeed(self) -> bool:
return self._should_succeed
def short_name(self) -> str:
return basename(self.file)
def run(self) -> BasicDiffTestResult:
out, err, rtcode = self.env.run_mj_command(self.MODE, self.file)
exp_out = ""
if rtcode == 0 and self.should_succeed():
if self._has_expected_output_file and self.type == self.MODE and self.env.mode == self.MODE:
with open(self._expected_output_file, "r") as f:
exp_out = f.read()
#else:
# _LOG.error("Expected output file for test case {}:{} is missing.".format(self.MODE, self.short_name()))
if self.type == self.MODE and self.env.mode == self.MODE:
return BasicDiffTestResult(self, rtcode, out.decode(), err.decode(), exp_out)
return BasicTestResult(self, rtcode, out.decode(), err.decode())
class LexerDiffTest(DiffTest):
MODE = TestMode.lexer
TestCase.TEST_CASE_CLASSES[TestMode.lexer].append(LexerDiffTest)
import mjtest.test.syntax_tests import mjtest.test.syntax_tests
import mjtest.test.ast_tests import mjtest.test.ast_tests
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment