Commit 8e19d593 authored by Johannes Bechberger's avatar Johannes Bechberger

Fix "only_incorrect_testcases" bug

And add "--produce_no_reports" option
parent 4a370c9f
...@@ -68,8 +68,8 @@ To get colored output install the python3 module `termcolor`. ...@@ -68,8 +68,8 @@ To get colored output install the python3 module `termcolor`.
Output of the `./mjt.py --help` Output of the `./mjt.py --help`
``` ```
usage: mjt.py [-h] [--only_incorrect_tests] [--parallel] usage: mjt.py [-h] [--only_incorrect_tests] [--produce_no_reports]
[--log_level LOG_LEVEL] [--parallel] [--log_level LOG_LEVEL]
{syntax,semantic,exec} MJ_RUN {syntax,semantic,exec} MJ_RUN
MiniJava test runner MiniJava test runner
...@@ -85,6 +85,8 @@ optional arguments: ...@@ -85,6 +85,8 @@ optional arguments:
-h, --help show this help message and exit -h, --help show this help message and exit
--only_incorrect_tests --only_incorrect_tests
Only run the tests that were incorrect the last run Only run the tests that were incorrect the last run
--produce_no_reports Produce no long reports besides the command line
output
--parallel Run the tests in parallel --parallel Run the tests in parallel
--log LOG Logging level (error, warn, info or debug) --log LOG Logging level (error, warn, info or debug)
``` ```
......
...@@ -36,6 +36,8 @@ if True:#__name__ == '__main__': ...@@ -36,6 +36,8 @@ if True:#__name__ == '__main__':
# help="Directory that contains all test cases, default is the 'tests' directory") # help="Directory that contains all test cases, default is the 'tests' directory")
parser.add_argument("--only_incorrect_tests", action="store_true", default=False, parser.add_argument("--only_incorrect_tests", action="store_true", default=False,
help="Only run the tests that were incorrect the last run") help="Only run the tests that were incorrect the last run")
parser.add_argument("--produce_no_reports", action="store_true", default=False,
help="Produce no long reports besides the command line output")
parser.add_argument("--parallel", action="store_true", default=False, parser.add_argument("--parallel", action="store_true", default=False,
help="Run the tests in parallel") help="Run the tests in parallel")
#parser.add_argument("--timeout", action="store_const", default=30, const="timeout", #parser.add_argument("--timeout", action="store_const", default=30, const="timeout",
...@@ -54,7 +56,8 @@ if True:#__name__ == '__main__': ...@@ -54,7 +56,8 @@ if True:#__name__ == '__main__':
finally: finally:
suite.env.clean_up() suite.env.clean_up()
suite.store() suite.store()
if ret is None or ret:
if ret is None or ret.failed > 0:
sys.exit(1) sys.exit(1)
else: else:
sys.exit(0) sys.exit(0)
...@@ -29,7 +29,8 @@ class Environment: ...@@ -29,7 +29,8 @@ class Environment:
def __init__(self, mode, mj_run: str, tmp_dir: str = "", test_dir: str = "", def __init__(self, mode, mj_run: str, tmp_dir: str = "", test_dir: str = "",
only_incorrect_tests: bool = False, parallel: bool = False, only_incorrect_tests: bool = False, parallel: bool = False,
timeout: int = 30, report_dir: str = "", log_level: str = "warn"): timeout: int = 30, report_dir: str = "", log_level: str = "warn",
produce_no_reports: bool = True):
self.mode = mode self.mode = mode
self.mj_run_cmd = os.path.realpath(mj_run) self.mj_run_cmd = os.path.realpath(mj_run)
...@@ -54,18 +55,21 @@ class Environment: ...@@ -54,18 +55,21 @@ class Environment:
self.only_incorrect_tests = only_incorrect_tests self.only_incorrect_tests = only_incorrect_tests
self.parallel = parallel self.parallel = parallel
self.timeout = timeout self.timeout = timeout
if not produce_no_reports:
if report_dir: if report_dir:
self.report_dir = os.path.abspath(os.path.expandvars(report_dir)) self.report_dir = os.path.abspath(os.path.expandvars(report_dir))
if not os.path.exists(report_dir): if not os.path.exists(report_dir):
os.mkdir(self.report_dir) os.mkdir(self.report_dir)
else:
self.report_dir = os.path.join(get_mjtest_basedir(), "reports")
if not os.path.exists(self.report_dir):
os.mkdir(self.report_dir)
self.report_dir = os.path.join(self.report_dir, datetime.now().strftime("%d-%m-%y_%H-%M-%S"))
os.mkdir(self.report_dir)
else: else:
self.report_dir = os.path.join(get_mjtest_basedir(), "reports") self.report_dir = None
if not os.path.exists(self.report_dir):
os.mkdir(self.report_dir)
self.report_dir = os.path.join(self.report_dir, datetime.now().strftime("%d-%m-%y_%H-%M-%S"))
os.mkdir(self.report_dir)
logging.basicConfig(level=self.LOG_LEVELS[log_level]) logging.basicConfig(level=self.LOG_LEVELS[log_level])
self.produce_reports = not produce_no_reports # type: bool
def create_tmpfile(self) -> str: def create_tmpfile(self) -> str:
return os.path.join(self.tmp_dir, str(os.times())) return os.path.join(self.tmp_dir, str(os.times()))
......
from collections import namedtuple from collections import namedtuple
import shutil import shutil
from typing import Optional, List, Tuple, T, Union, Dict from typing import Optional, List, Tuple, T, Union, Dict
import collections
from mjtest.environment import Environment, TestMode, TEST_MODES from mjtest.environment import Environment, TestMode, TEST_MODES
from os.path import join, exists, basename from os.path import join, exists, basename
import logging import logging
...@@ -25,7 +26,7 @@ class TestSuite: ...@@ -25,7 +26,7 @@ class TestSuite:
def __init__(self, env: Environment): def __init__(self, env: Environment):
self.env = env self.env = env
self.test_cases = {} # type: Dict[str, List[TestCase]] self.test_cases = {} # type: Dict[str, List[TestCase]]
self.correct_test_cases = {} # type: Dict[str, List[str]] self.correct_test_cases = collections.defaultdict(set) # type: Dict[str, Set[str]]
self._load_test_cases() self._load_test_cases()
def _load_test_cases(self): def _load_test_cases(self):
...@@ -47,7 +48,12 @@ class TestSuite: ...@@ -47,7 +48,12 @@ class TestSuite:
log_file = self._log_file_for_type(mode) log_file = self._log_file_for_type(mode)
if exists(log_file): if exists(log_file):
with open(log_file) as f: with open(log_file) as f:
correct_test_cases = set(f.readlines()) correct_test_cases = set()
for t in f.readlines():
t = t.strip()
if len(t) > 0:
self.correct_test_cases[mode].add(t)
correct_test_cases.add(t)
for file in sorted(os.listdir(dir)): for file in sorted(os.listdir(dir)):
if not TestCase.has_valid_file_ending(mode, file): if not TestCase.has_valid_file_ending(mode, file):
_LOG.debug("Skip file " + file) _LOG.debug("Skip file " + file)
...@@ -66,7 +72,7 @@ class TestSuite: ...@@ -66,7 +72,7 @@ class TestSuite:
return join(self.env.test_dir, type, ".mjtest_correct_testcases") return join(self.env.test_dir, type, ".mjtest_correct_testcases")
def _add_correct_test_case(self, test_case: 'TestCase'): def _add_correct_test_case(self, test_case: 'TestCase'):
self.correct_test_cases[test_case.type].append(basename(test_case.file)) self.correct_test_cases[test_case.type].add(basename(test_case.file))
def run(self) -> RunResult: def run(self) -> RunResult:
ret = RunResult(0, 0) ret = RunResult(0, 0)
...@@ -87,10 +93,11 @@ class TestSuite: ...@@ -87,10 +93,11 @@ class TestSuite:
colored("{} failed.".format(ret.failed), "red", attrs=["bold"])) colored("{} failed.".format(ret.failed), "red", attrs=["bold"]))
else: else:
cprint("All {} run tests succeeded".format(ret.count), "green") cprint("All {} run tests succeeded".format(ret.count), "green")
report_dir = self.env.report_dir + "." + ("successful" if ret.failed == 0 else "failed") if self.env.produce_reports:
os.rename(self.env.report_dir, report_dir) report_dir = self.env.report_dir + "." + ("successful" if ret.failed == 0 else "failed")
print("A full report for each test can be found at {}".format( os.rename(self.env.report_dir, report_dir)
os.path.relpath(report_dir))) print("A full report for each test can be found at {}".format(
os.path.relpath(report_dir)))
return ret return ret
...@@ -109,13 +116,18 @@ class TestSuite: ...@@ -109,13 +116,18 @@ class TestSuite:
def _func(self, test_case: 'TestCase'): def _func(self, test_case: 'TestCase'):
ret = self._run_test_case(test_case) ret = self._run_test_case(test_case)
if ret is not False and ret.is_correct(): if ret is not False and ret.is_correct():
return 0, [test_case] return 0, test_case
return 1, [] return 1, test_case
def _run_parallel(self, mode: str, parallel_jobs: int) -> RunResult: def _run_parallel(self, mode: str, parallel_jobs: int) -> RunResult:
pool = multiprocessing.Pool(parallel_jobs) pool = multiprocessing.Pool(parallel_jobs)
rets = pool.map(self._func, self.test_cases[mode]) rets = pool.map(self._func, self.test_cases[mode])
return RunResult(len(rets), sum(map(lambda x: x[0], rets))) result = RunResult(len(rets), sum(map(lambda x: x[0], rets)))
for (suc, test_case) in rets:
if suc == 0:
self._add_correct_test_case(test_case)
return result
def _run_test_case(self, test_case: 'TestCase') -> Optional['TestResult']: def _run_test_case(self, test_case: 'TestCase') -> Optional['TestResult']:
try: try:
...@@ -126,13 +138,14 @@ class TestSuite: ...@@ -126,13 +138,14 @@ class TestSuite:
tc=test_case.name()), color, attrs=["bold"]) + tc=test_case.name()), color, attrs=["bold"]) +
colored("" if ret.is_correct() else ret.short_message(), color)) colored("" if ret.is_correct() else ret.short_message(), color))
try: try:
if not exists(self.env.report_dir): if self.env.produce_reports:
os.mkdir(self.env.report_dir) if not exists(self.env.report_dir):
rep_dir = join(self.env.report_dir, test_case.type) os.mkdir(self.env.report_dir)
if not exists(rep_dir): rep_dir = join(self.env.report_dir, test_case.type)
os.mkdir(rep_dir) if not exists(rep_dir):
suffix = ".correct" if ret.is_correct() else ".incorrect" os.mkdir(rep_dir)
ret.store_at(join(rep_dir, test_case.short_name() + suffix)) suffix = ".correct" if ret.is_correct() else ".incorrect"
ret.store_at(join(rep_dir, test_case.short_name() + suffix))
return ret return ret
except IOError: except IOError:
_LOG.exception("Caught i/o error while trying to store the report for '{}'" _LOG.exception("Caught i/o error while trying to store the report for '{}'"
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment