Commit 529180eb authored by Johannes Bechberger's avatar Johannes Bechberger

Implement `exec` and `exec-firm` as aliases

Fix #5
parent 9ed9c077
...@@ -24,9 +24,9 @@ The test cases are divided in 6 'modes': ...@@ -24,9 +24,9 @@ The test cases are divided in 6 'modes':
- __ast__: Test cases that check the generated ast by using the pretty printing functionality. - __ast__: Test cases that check the generated ast by using the pretty printing functionality.
- __semantic__: Test cases that check semantic checking of MiniJava programs - __semantic__: Test cases that check semantic checking of MiniJava programs
- __compile-firm-only__: Test cases that check the compilation of MiniJava programs with the libfirm backend. - __compile-firm-only__: Test cases that check the compilation of MiniJava programs with the libfirm backend.
- __compile-firm__: Test cases that check the correct compilation and execution of MiniJava programs with the libfirm backend. - __compile-firm__ (alias `exec-firm`): Test cases that check the correct compilation and execution of MiniJava programs with the libfirm backend.
- __compile-only__: Test cases that check the compilation MiniJava programs with the self implemented backend. - __compile-only__: Test cases that check the compilation MiniJava programs with the self implemented backend.
- __compile__: Test cases that check the correct compilation and execution of MiniJava programs with the self implemented backend. - __compile__ (alias `exec`): Test cases that check the correct compilation and execution of MiniJava programs with the self implemented backend.
The test different test cases for each mode are located in a folder with the same name. The test different test cases for each mode are located in a folder with the same name.
Except the compile-firm test cases which are located in the `exec` folder. Except the compile-firm test cases which are located in the `exec` folder.
...@@ -187,12 +187,12 @@ usage: mjt.py [-h] [--only_incorrect_tests] [--all_exec_tests] ...@@ -187,12 +187,12 @@ usage: mjt.py [-h] [--only_incorrect_tests] [--all_exec_tests]
[--output_no_incorrect_reports] [--color] [--ci_testing] [--output_no_incorrect_reports] [--color] [--ci_testing]
[--log_level LOG_LEVEL] [--log_level LOG_LEVEL]
{all,lexer,syntax,ast,semantic,compile-firm-only,compile-only, {all,lexer,syntax,ast,semantic,compile-firm-only,compile-only,
compile-firm,compile} MJ_RUN compile-firm,compile,exec,exec-firm} MJ_RUN
MiniJava test runner MiniJava test runner
positional arguments: positional arguments:
{all,lexer,syntax,ast,semantic,compile-firm-only,compile-only,compile-firm,exec} {all,lexer,syntax,ast,semantic,compile-firm-only,compile-only,compile-firm,exec,exec-firm}
What do you want to test? What do you want to test?
MJ_RUN Command to run your MiniJava implementation, e.g. MJ_RUN Command to run your MiniJava implementation, e.g.
`mj/run`, can be omitted by assigning the environment `mj/run`, can be omitted by assigning the environment
......
...@@ -27,7 +27,7 @@ class LogLevelChoices(argparse.Action): ...@@ -27,7 +27,7 @@ class LogLevelChoices(argparse.Action):
if True:#__name__ == '__main__': if True:#__name__ == '__main__':
parser = argparse.ArgumentParser(description="MiniJava test runner", add_help=True) parser = argparse.ArgumentParser(description="MiniJava test runner", add_help=True)
parser.add_argument("mode", parser.add_argument("mode",
choices=["all"] + TEST_MODES, choices=["all"] + TEST_MODES + ["exec-firm"],
help="What do you want to test?") help="What do you want to test?")
if os.getenv("MJ_RUN", None) is None: if os.getenv("MJ_RUN", None) is None:
parser.add_argument("mj_run", parser.add_argument("mj_run",
...@@ -84,6 +84,12 @@ if True:#__name__ == '__main__': ...@@ -84,6 +84,12 @@ if True:#__name__ == '__main__':
if ret is not None: if ret is not None:
failed += ret.failed failed += ret.failed
count += ret.count count += ret.count
if args["mode"] in ["exec", "exec-firm"]:
new_mode = args["mode"].replace("exec", "compile")
print(colored("\"{}\" is just an alias for \"{}\", using the latter instead"
.format(args["mode"], new_mode),
"yellow"))
args["mode"] = new_mode
if args["mode"] == "all": if args["mode"] == "all":
report_subdir = datetime.now().strftime("%d-%m-%y_%H-%M-%S") report_subdir = datetime.now().strftime("%d-%m-%y_%H-%M-%S")
for mode in [TestMode.lexer, TestMode.syntax, TestMode.ast, TestMode.semantic, for mode in [TestMode.lexer, TestMode.syntax, TestMode.ast, TestMode.semantic,
......
...@@ -49,8 +49,8 @@ class BenchExecTest(BasicSyntaxTest): ...@@ -49,8 +49,8 @@ class BenchExecTest(BasicSyntaxTest):
INVALID_FILE_ENDINGS = [".inf.java", ".inf.mj"] INVALID_FILE_ENDINGS = [".inf.java", ".inf.mj"]
MODE = TestMode.compile_firm MODE = TestMode.compile_firm
def __init__(self, env: Environment, type: str, file: str, preprocessed_file: str): def __init__(self, env: Environment, type: str, file: str, preprocessed_file: str, log_file_mode: str = ""):
super().__init__(env, type, file, preprocessed_file) super().__init__(env, type, file, preprocessed_file, log_file_mode)
self._should_succeed = True self._should_succeed = True
def _bench_command(self, cmd: str, *args: Tuple[str]) -> _RunResult: def _bench_command(self, cmd: str, *args: Tuple[str]) -> _RunResult:
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment