...
 
Commits (2)
......@@ -185,9 +185,10 @@ Output of the `./mjt.py --help`
usage: mjt.py [-h] [--only_incorrect_tests] [--all_exec_tests]
[--produce_no_reports] [--produce_all_reports] [--parallel]
[--output_no_incorrect_reports] [--color] [--ci_testing]
[--log_level LOG_LEVEL]
[--log_level LOG_LEVEL] [--bench_compiler_flag_1]
[--bench_compiler_flag_2]
{all,lexer,syntax,ast,semantic,compile-firm-only,compile-only,
compile-firm,compile,exec,exec-firm} MJ_RUN
compile-firm,compile,exec,exec-firm,bench} MJ_RUN
MiniJava test runner
......@@ -217,6 +218,14 @@ optional arguments:
modes/phases should also succeed in this mode, and
failing test cases of prior modes/phases should also
fail in this phase.
--bench_compiler_flag_1 BENCH_COMPILER_FLAG_1
Set the first compiler flag/mode that is used for
comparison, 'javac' for the java compiler
--bench_compiler_flag_2 BENCH_COMPILER_FLAG_2
Set the first compiler flag/mode that is used for
comparison, 'javac' for the java compiler
--bench_runs BENCH_RUNS
Number of times to run a benchmarked code
--log_level LOG_LEVEL
Logging level (error, warn, info or debug)
```
......
......@@ -55,8 +55,12 @@ if True:#__name__ == '__main__':
parser.add_argument("--ci_testing", action="store_true", default=False,
help="In mode X the succeeding test cases of later modes/phases should also succeed in "
"this mode, and failing test cases of prior modes/phases should also fail in this phase.")
parser.add_argument("--bench_compiler_flags", action="store", default="", type=str, nargs=2,
help="Set the different compiler flags/modes that are compared, 'javac' for the java compiler")
parser.add_argument("--bench_compiler_flag_1", action="store", default="", type=str,
help="Set the first compiler flag/mode that is used for comparison, "
"'javac' for the java compiler")
parser.add_argument("--bench_compiler_flag_2", action="store", default="", type=str,
help="Set the first compiler flag/mode that is used for comparison, "
"'javac' for the java compiler")
parser.add_argument("--bench_runs", action="store", type=int, default=10,
help="Number of times to run a benchmarked code")
#parser.add_argument("--timeout", action="store_const", default=30, const="timeout",
......
......@@ -80,7 +80,8 @@ class Environment:
produce_no_reports: bool = True, output_no_incorrect_reports: bool = False,
produce_all_reports: bool = False, report_subdir: str = None,
ci_testing: bool = False, color: bool = False,
all_exec_tests: bool = True, bench_compiler_flags: List[str]=[],
all_exec_tests: bool = True, bench_compiler_flag_1: str = "",
bench_compiler_flag_2: str = "",
bench_runs: int = 10):
if color:
force_colored_output()
......@@ -134,7 +135,7 @@ class Environment:
self.timeout = float(os.getenv("MJ_TIMEOUT", "10"))
self.big_timeout = float(os.getenv("MJ_BIG_TIMEOUT", "60"))
self.bench_compiler_flags = bench_compiler_flags
self.bench_compiler_flags = [bench_compiler_flag_1, bench_compiler_flag_2]
self.bench_runs = bench_runs
def create_tmpfile(self) -> str:
......