Commit 9254fdd4 authored by Johannes Bechberger's avatar Johannes Bechberger

Initil commit

parents
Copyright (c) 2016 mj3-16 Team (and contributers)
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
recursive-include mjtest *.py
README.mdwn
LICENCE
MJTest
======
A test runner (and suite) for the MiniJava compiler (and its parts) written in the compiler lab of the KIT.
It's heavily inspired by Sisyphus (and uses some of its code)
__Please contribute to the test cases__
Test modes
----------
The test cases are divided in 3 'modes':
- __syntax__: Test cases that just check whether `./run --parsecheck` accepts as correct or rejects
them.
- __semantic__: Test cases that check semantic checking of MiniJava programs
- __exec__: Test cases that check the correct compilation of MiniJava programs.
_Only the syntax mode is currently usable, but the other three will follow._
The test different test cases for each mode are located in a folder with the same name.
The default directory that contains all test folders is `tests`.
The different types a test cases are differentiated by their file endings.
Test types for the syntax mode
------------------------------
<table>
<tr><th>File ending(s) of test cases</th><th>Expected behaviour to complete a test of this type</th></tr>
<tr>
<td><code>.valid.mj</code><code>.mj</code>
<td>Return code is <code>0</code>, i.e. the MiniJava is accepted as syntactically correct</td>
</tr>
<tr>
<td><code>.invalid.mj</code>
<td>Return code is <code>&gt; 0</code> and the error output contains the word <code>error</code></td>
</tr>
</table>
Test runner
-----------
### Requirements
The following programs are required (and executable by simply calling their names).
- `python3` (at least Python3.3)
- `javac` and `java`
### Installation
Just clone `mjtest` and install it via `pip3`.
```sh
git clone https://github.com/mj3-16/mjtest
cd mjtest
sudo pip3 install .
```
### Usage
Output of the `mjtest --help`
```
usage: mjtest [-h] [--tmp_dir] [--test_dir] [--only_incorrect_tests]
[--parallel] [--timeout] [--report_dir] [--log LOG]
{syntax,semantic,exec} MJ_RUN_CMD
MiniJava test runner
positional arguments:
{syntax,semantic,exec}
What do you want to test?
MJ_RUN_CMD Command to run your MiniJava implementation, e.g.
`mj/run`
optional arguments:
-h, --help show this help message and exit
--tmp_dir Used temporary directory
--test_dir Directory that contains all test cases, default is the
'tests' directory
--only_incorrect_tests
Only run the tests that were incorrect the last run
--parallel Run the tests in parallel
--timeout Abort a program after TIMEOUT seconds
--report_dir Directory to store the reports in, default is
'reports'
--log LOG Logging level (error, warn, info or debug)
```
### Example usage
Assuming you want to run the syntax tests and your MiniJava base folder is `~/code/mj` then run
```
mjtest syntax `~/code/mj/run --lextest`
```
This will…
- … create reports in a folder named after the current date and time inside the `reports` folder
- … output something like
```
```
- … log that some test cases were executed correctly
- … return with an error code of `0` if all tests executed correct
Contributions
-------------
__Please contribute to this test runner and the accompanied test cases.__
To add test cases just open a pull request. The test cases must have unique names (in each mode folder).
Licence
-------
MIT, see LICENCE file for more information.
VERSION = "0.42"
\ No newline at end of file
import logging
from pprint import pprint
import sys
import mjtest.util.utils
import argparse
from mjtest.environment import TestMode, Environment, TEST_MODES
from mjtest.test.tests import TestSuite
# adapted from http://stackoverflow.com/a/8527629
class LogLevelChoices(argparse.Action):
CHOICES = ["error", "warn", "info", "debug"]
def __call__(self, parser, namespace, values, option_string=None):
if values:
for value in values:
if value not in self.CHOICES:
message = ("invalid choice: {0!r} (choose from {1})"
.format(value,
', '.join([repr(action)
for action in self.CHOICES])))
raise argparse.ArgumentError(self, message)
setattr(namespace, self.dest, values)
if True:#__name__ == '__main__':
parser = argparse.ArgumentParser(description="MiniJava test runner", add_help=True)
parser.add_argument("mode",
choices=TEST_MODES,
help="What do you want to test?")
parser.add_argument("mj_run_cmd",
metavar="MJ_RUN_CMD",
help="Command to run your MiniJava implementation, e.g. `mj/run`")
parser.add_argument("--tmp_dir", action="store_const", default="", const="tmp_dir",
help="Used temporary directory")
parser.add_argument("--test_dir", action="store_const", default="", const="test_dir",
help="Directory that contains all test cases, default is the 'tests' directory")
parser.add_argument("--only_incorrect_tests", action="store_true", default=False,
help="Only run the tests that were incorrect the last run")
parser.add_argument("--parallel", action="store_true", default=False,
help="Run the tests in parallel")
parser.add_argument("--timeout", action="store_const", default=30, const="timeout",
help="Abort a program after TIMEOUT seconds")
parser.add_argument("--report_dir", action="store_const", default="", const="report_dir",
help="Directory to store the reports in, default is 'reports'")
parser.add_argument("--log_level", action=LogLevelChoices, default="warn", const="log_level",
help="Logging level (error, warn, info or debug)")
args = parser.parse_args()
suite = TestSuite(Environment(**vars(args)))
ret = None
try:
ret = suite.run()
finally:
suite.env.clean_up()
suite.store()
if ret is None or ret:
sys.exit(1)
else:
sys.exit(0)
import logging
import os
import shutil
import tempfile
from datetime import datetime, time
from mjtest.util.shell import execute
from mjtest.util.utils import get_mjtest_basedir
import humanfriendly as hf
from typing import Tuple, List
class TestMode:
syntax = "syntax"
semantic = "semantic"
exec = "exec"
""" All 'success' tests of the n.th mode can used as 'success' tests for the n-1.th mode"""
TEST_MODES = [TestMode.syntax, TestMode.semantic, TestMode.exec]
class Environment:
LOG_LEVELS = {
"info": logging.INFO,
"error": logging.ERROR,
"warn": logging.WARN,
"debug": logging.DEBUG
}
def __init__(self, mode, mj_run_cmd: str, tmp_dir: str = "", test_dir: str = "",
only_incorrect_tests: bool = False, parallel: bool = False,
timeout: int = 30, report_dir: str = "", log_level: str = "warn"):
self.mode = mode
self.mj_run_cmd = os.path.realpath(mj_run_cmd)
if tmp_dir:
self.own_tmp_dir = True
self.tmp_dir = os.path.abspath(os.path.expandvars(tmp_dir))
if not os.path.exists(tmp_dir):
os.mkdir(self.tmp_dir)
else:
self.own_tmp_dir = False
self.tmp_dir = tempfile.mkdtemp("mjtest")
if test_dir:
self.test_dir = os.path.abspath(os.path.realpath(test_dir))
else:
self.test_dir = os.path.join(get_mjtest_basedir(), "tests")
if not os.path.exists(self.test_dir):
os.mkdir(self.test_dir)
for d in [TestMode.syntax, TestMode.semantic, TestMode.exec]:
os.mkdir(os.path.join(self.test_dir, d))
self.only_incorrect_tests = only_incorrect_tests
self.parallel = parallel
self.timeout = timeout
if tmp_dir:
self.report_dir = os.path.abspath(os.path.expandvars(report_dir))
if not os.path.exists(report_dir):
os.mkdir(self.report_dir)
else:
self.report_dir = os.path.join(get_mjtest_basedir(), "reports")
if not os.path.exists(self.report_dir):
os.mkdir(self.report_dir)
self.report_dir = os.path.join(self.report_dir, datetime.now().strftime("%d.%m.%y:%X"))
os.mkdir(self.report_dir)
logging.basicConfig(level=self.LOG_LEVELS[log_level])
def create_tmpfile(self) -> str:
return os.path.join(self.tmp_dir, os.times())
def clean_up(self):
if not self.own_tmp_dir:
shutil.rmtree(self.tmp_dir)
def run_mj_command(self, *args: Tuple[str]) -> Tuple[bytes, bytes, int]:
"""
Execute the MiniJava `run` script with the given arguments.
:param args: arguments for the MiniJava `run` script
:return: (out, err, return code)
"""
cmd = [self.mj_run_cmd] + list(args)
return execute(cmd, timeout=self.timeout)
\ No newline at end of file
__author__ = 'parttimenerd'
from mjtest.environment import Environment, TestMode
from mjtest.test.tests import TestCase, TestResult, BasicTestResult
from os import path
class BasicSyntaxTest(TestCase):
FILE_ENDINGS = [".invalid.mj", ".valid.mj", ".mj"]
def __init__(self, env: Environment, type: str, file: str):
super().__init__(env, type, file)
self._should_succeed = not file.endswith(".invalid.mj")
def should_succeed(self) -> bool:
return self._should_succeed
def short_name(self) -> str:
return path.basename(self.file)[:-3]
def run(self) -> TestResult:
out, err, rtcode = self.env.run_mj_command("--parsetest ", self.file)
return BasicTestResult(self, rtcode, out.decode(), err.decode())
TestCase.TEST_CASE_CLASSES[TestMode.syntax].append(BasicSyntaxTest)
This diff is collapsed.
from pkgutil import extend_path
__path__ = extend_path(__path__, __name__)
# Copyright 2009 Brian Quinlan. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Execute computations asynchronously using threads or processes."""
__author__ = 'Brian Quinlan (brian@sweetapp.com)'
from base import (FIRST_COMPLETED,
FIRST_EXCEPTION,
ALL_COMPLETED,
CancelledError,
TimeoutError,
Future,
Executor,
wait,
as_completed)
from process import ProcessPoolExecutor
from thread import ThreadPoolExecutor
This diff is collapsed.
# source: https://github.com/libfirm/sisyphus
from keyword import iskeyword as _iskeyword
from operator import itemgetter as _itemgetter
import sys as _sys
def namedtuple(typename, field_names):
"""Returns a new subclass of tuple with named fields.
>>> Point = namedtuple('Point', 'x y')
>>> Point.__doc__ # docstring for the new class
'Point(x, y)'
>>> p = Point(11, y=22) # instantiate with positional args or keywords
>>> p[0] + p[1] # indexable like a plain tuple
33
>>> x, y = p # unpack like a regular tuple
>>> x, y
(11, 22)
>>> p.x + p.y # fields also accessable by name
33
>>> d = p._asdict() # convert to a dictionary
>>> d['x']
11
>>> Point(**d) # convert from a dictionary
Point(x=11, y=22)
>>> p._replace(x=100) # _replace() is like str.replace() but targets named fields
Point(x=100, y=22)
"""
# Parse and validate the field names. Validation serves two purposes,
# generating informative error messages and preventing template injection attacks.
if isinstance(field_names, basestring):
field_names = field_names.replace(',', ' ').split() # names separated by whitespace and/or commas
field_names = tuple(map(str, field_names))
for name in (typename,) + field_names:
if not all(c.isalnum() or c=='_' for c in name):
raise ValueError('Type names and field names can only contain alphanumeric characters and underscores: %r' % name)
if _iskeyword(name):
raise ValueError('Type names and field names cannot be a keyword: %r' % name)
if name[0].isdigit():
raise ValueError('Type names and field names cannot start with a number: %r' % name)
seen_names = set()
for name in field_names:
if name.startswith('_'):
raise ValueError('Field names cannot start with an underscore: %r' % name)
if name in seen_names:
raise ValueError('Encountered duplicate field name: %r' % name)
seen_names.add(name)
# Create and fill-in the class template
numfields = len(field_names)
argtxt = repr(field_names).replace("'", "")[1:-1] # tuple repr without parens or quotes
reprtxt = ', '.join('%s=%%r' % name for name in field_names)
dicttxt = ', '.join('%r: t[%d]' % (name, pos) for pos, name in enumerate(field_names))
template = '''class %(typename)s(tuple):
'%(typename)s(%(argtxt)s)' \n
__slots__ = () \n
_fields = %(field_names)r \n
def __new__(_cls, %(argtxt)s):
return _tuple.__new__(_cls, (%(argtxt)s)) \n
@classmethod
def _make(cls, iterable, new=tuple.__new__, len=len):
'Make a new %(typename)s object from a sequence or iterable'
result = new(cls, iterable)
if len(result) != %(numfields)d:
raise TypeError('Expected %(numfields)d arguments, got %%d' %% len(result))
return result \n
def __repr__(self):
return '%(typename)s(%(reprtxt)s)' %% self \n
def _asdict(t):
'Return a new dict which maps field names to their values'
return {%(dicttxt)s} \n
def _replace(_self, **kwds):
'Return a new %(typename)s object replacing specified fields with new values'
result = _self._make(map(kwds.pop, %(field_names)r, _self))
if kwds:
raise ValueError('Got unexpected field names: %%r' %% kwds.keys())
return result \n
def __getnewargs__(self):
return tuple(self) \n\n''' % locals()
for i, name in enumerate(field_names):
template += ' %s = _property(_itemgetter(%d))\n' % (name, i)
# Execute the template string in a temporary namespace and
# support tracing utilities by setting a value for frame.f_globals['__name__']
namespace = dict(_itemgetter=_itemgetter, __name__='namedtuple_%s' % typename,
_property=property, _tuple=tuple)
try:
exec(template, namespace)
except SyntaxError:
e = _sys.exc_info()[1]
raise SyntaxError(e.message + ':\n' + template)
result = namespace[typename]
# For pickling to work, the __module__ variable needs to be set to the frame
# where the named tuple is created. Bypass this step in enviroments where
# sys._getframe is not defined (Jython for example).
if hasattr(_sys, '_getframe'):
result.__module__ = _sys._getframe(1).f_globals.get('__name__', '__main__')
return result
This diff is collapsed.
# Copyright 2009 Brian Quinlan. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Implements ThreadPoolExecutor."""
from __future__ import with_statement
import atexit
import threading
import weakref
import sys
import base
try:
import queue
except ImportError:
import Queue as queue
__author__ = 'Brian Quinlan (brian@sweetapp.com)'
# Workers are created as daemon threads. This is done to allow the interpreter
# to exit when there are still idle threads in a ThreadPoolExecutor's thread
# pool (i.e. shutdown() was not called). However, allowing workers to die with
# the interpreter has two undesirable properties:
# - The workers would still be running during interpretor shutdown,
# meaning that they would fail in unpredictable ways.
# - The workers could be killed while evaluating a work item, which could
# be bad if the callable being evaluated has external side-effects e.g.
# writing to a file.
#
# To work around this problem, an exit handler is installed which tells the
# workers to exit when their work queues are empty and then waits until the
# threads finish.
_thread_references = set()
_shutdown = False
def _remove_dead_thread_references():
"""Remove inactive threads from _thread_references.
Should be called periodically to prevent memory leaks in scenarios such as:
>>> while True:
... t = ThreadPoolExecutor(max_workers=5)
... t.map(int, ['1', '2', '3', '4', '5'])
"""
for thread_reference in set(_thread_references):
if thread_reference() is None:
_thread_references.discard(thread_reference)
class _WorkItem(object):
def __init__(self, future, fn, args, kwargs):
self.future = future
self.fn = fn
self.args = args
self.kwargs = kwargs
def run(self):
if not self.future.set_running_or_notify_cancel():
return
id = "%s(%s,%s)" % (self.fn, self.args, self.kwargs)
base.LOGGER.debug('run WorkItem: '+id)
try:
result = self.fn(*self.args, **self.kwargs)
except BaseException:
e = sys.exc_info()[1]
self.future.set_exception(e)
base.LOGGER.debug('WorkItem(%s) finished with exception: %s' % (id, e))
else:
self.future.set_result(result)
base.LOGGER.debug('WorkItem(%s) finished with result: %s' % (id, result))
def _worker(executor_reference, work_queue):
try:
while True:
try:
work_item = work_queue.get(block=True, timeout=0.1)
except queue.Empty:
executor = executor_reference()
# Exit if:
# - The interpreter is shutting down OR
# - The executor that owns the worker has been collected OR
# - The executor that owns the worker has been shutdown.
if _shutdown or executor is None or executor._shutdown:
return
del executor
else:
work_item.run()
except BaseException:
base.LOGGER.critical('Exception in worker', exc_info=True)
class ThreadPoolExecutor(base.Executor):
def __init__(self, max_workers):
"""Initializes a new ThreadPoolExecutor instance.
Args:
max_workers: The maximum number of threads that can be used to
execute the given calls.
"""
_remove_dead_thread_references()
self._max_workers = max_workers
self._work_queue = queue.Queue()
self._threads = set()
self._shutdown = False
self._shutdown_lock = threading.Lock()
base.LOGGER.debug('ThreadPoolExecutor with %d workers max' % max_workers)
def submit(self, fn, *args, **kwargs):
with self._shutdown_lock:
if self._shutdown:
msg = 'cannot schedule new futures after shutdown'
base.LOGGER.error(msg)
raise RuntimeError(msg)
f = base.Future()
w = _WorkItem(f, fn, args, kwargs)
self._work_queue.put(w)
self._adjust_thread_count()
return f
submit.__doc__ = base.Executor.submit.__doc__
def _adjust_thread_count(self):
# TODO(bquinlan): Should avoid creating new threads if there are more
# idle threads than items in the work queue.
if len(self._threads) < self._max_workers:
t = threading.Thread(target=_worker,
args=(weakref.ref(self), self._work_queue))
t.daemon = True
t.start()
self._threads.add(t)
_thread_references.add(weakref.ref(t))
base.LOGGER.info('ThreadPoolExecutor spawned a new thread, now at %d' % len(self._threads))
def shutdown(self, wait=True):
with self._shutdown_lock:
self._shutdown = True
if wait:
base.LOGGER.debug('ThreadPoolExecutor shutting down, waiting for threads')
for t in self._threads:
t.join()
base.LOGGER.info('ThreadPoolExecutor shut down')
shutdown.__doc__ = base.Executor.shutdown.__doc__
# source: https://github.com/libfirm/sisyphus
import os
import re
import subprocess
def available_cpu_count():
""" Number of available virtual or physical CPUs on this system, i.e.
user/real as output by time(1) when called with an optimally scaling
userspace-only program"""
# cpuset
# cpuset may restrict the number of *available* processors
try:
m = re.search(r'(?m)^Cpus_allowed:\s*(.*)$',
open('/proc/self/status').read())
if m:
res = bin(int(m.group(1).replace(',', ''), 16)).count('1')
if res > 0:
return res
except IOError:
pass
# Python 2.6+
try:
import multiprocessing
return multiprocessing.cpu_count()
except (ImportError, NotImplementedError):
pass
# http://code.google.com/p/psutil/
try:
import psutil
return psutil.cpu_count() # psutil.NUM_CPUS on old versions
except (ImportError, AttributeError):
pass
# POSIX
try:
res = int(os.sysconf('SC_NPROCESSORS_ONLN'))
if res > 0:
return res
except (AttributeError, ValueError):
pass
# Windows
try:
res = int(os.environ['NUMBER_OF_PROCESSORS'])
if res > 0:
return res
except (KeyError, ValueError):
pass
# jython
try:
from java.lang import Runtime
runtime = Runtime.getRuntime()
res = runtime.availableProcessors()
if res > 0:
return res
except ImportError:
pass
# BSD
try:
sysctl = subprocess.Popen(['sysctl', '-n', 'hw.ncpu'],
stdout=subprocess.PIPE)
scStdout = sysctl.communicate()[0]
res = int(scStdout)
if res > 0:
return res
except (OSError, ValueError):
pass
# Linux
try:
res = open('/proc/cpuinfo').read().count('processor\t:')
if res > 0:
return res
except IOError:
pass
# Solaris
try:
pseudoDevices = os.listdir('/devices/pseudo/')
res = 0
for pd in pseudoDevices:
if re.match(r'^cpuid@[0-9]+$', pd):
res += 1
if res > 0:
return res
except OSError:
pass
# Other UNIXes (heuristic)
try:
try:
dmesg = open('/var/run/dmesg.boot').read()
except IOError:
dmesgProcess = subprocess.Popen(['dmesg'], stdout=subprocess.PIPE)
dmesg = dmesgProcess.communicate()[0]
res = 0
while '\ncpu' + str(res) + ':' in dmesg:
res += 1
if res > 0:
return res
except OSError:
pass
raise Exception('Can not determine number of CPUs on this system')
# source: https://github.com/libfirm/sisyphus
"""
Convenience function
Alternative to subprocess and os.system
"""
import subprocess
import resource
import sys
import signal
import threading
import logging
_LOG = logging.getLogger("sisyphus")
_EXIT_CODES = dict((-k, v) for v, k in signal.__dict__.items() if v.startswith('SIG'))
del _EXIT_CODES[0]
class SigKill(Exception):
def __init__(self, retcode, name):
self.retcode = retcode
self.name = name
def _lower_rlimit(res, limit):
(soft, hard) = resource.getrlimit(res)
if soft > limit or soft == resource.RLIM_INFINITY:
soft = limit
if hard > limit or hard == resource.RLIM_INFINITY:
hard = limit
resource.setrlimit(res, (soft, hard))
class _Execute(object):
def __init__(self, cmd, timeout, env, rlimit):