style: pep8 style for python files

This commit is contained in:
Gerwin Klein 2020-03-18 17:25:55 +08:00 committed by Gerwin Klein
parent 046a1358f6
commit 75acf19dcd
20 changed files with 500 additions and 355 deletions

View File

@ -11,11 +11,12 @@ import optparse
import re
import sys
def main():
# Parse arguments.
parser = optparse.OptionParser(usage="usage: %prog [options] <thy file>")
parser.add_option("-v", "--verbose", dest="verbose",
help="Show Isabelle output", action="store_true", default=False)
help="Show Isabelle output", action="store_true", default=False)
(options, args) = parser.parse_args()
if len(args) != 1:
parser.error("Expected a single argument containing the Isabelle theory file to run.")
@ -27,7 +28,7 @@ def main():
# Run Isabelle.
process = pexpect.spawn('isabelle-process',
['-r', '-q', '-f', '-e', 'use_thy \"%s\";' % filename, "Benchmark"], timeout=None)
['-r', '-q', '-f', '-e', 'use_thy \"%s\";' % filename, "Benchmark"], timeout=None)
if not options.verbose:
process.logfile = None
else:
@ -47,7 +48,7 @@ def main():
re.compile('^category:: (.*)$', re.MULTILINE),
re.compile('^\*\*\* (.*)$', re.MULTILINE),
pexpect.EOF
])
])
if result == 0:
# Benchmark result.
print(" " + process.match.group(1).strip("\r\n"))
@ -74,5 +75,6 @@ def main():
sys.exit(1)
sys.exit(0)
if __name__ == "__main__":
main()

View File

@ -5,18 +5,21 @@
# SPDX-License-Identifier: BSD-2-Clause
#
import argparse, os, sys
import isasymbols
import argparse
import os
import sys
MY_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(MY_DIR, '../../../internal/misc/pysymbols'))
import isasymbols
def main(argv):
parser = argparse.ArgumentParser(
description='generate unicode-ascii translation tables')
parser.add_argument('--output', '-o', type=argparse.FileType('w'),
default=sys.stdout, help='output file')
default=sys.stdout, help='output file')
options = parser.parse_args(argv[1:])
with open(os.path.join(MY_DIR, '../../../isabelle/etc/symbols')) as f:
@ -26,22 +29,23 @@ def main(argv):
options.output.write('static map<wstring, wchar_t> ascii_to_unicode = {\n')
for sym in t.symbols:
options.output.write(' { L"%s", L\'\\x%x\' },\n' %
(sym.ascii_text.replace('\\', '\\\\'), sym.code_point))
(sym.ascii_text.replace('\\', '\\\\'), sym.code_point))
options.output.write('};\n\n')
# Write a unicode-to-ASCII table.
options.output.write('static map<wchar_t, wstring> unicode_to_ascii = {\n')
for s in t.symbols:
options.output.write(' { L\'\\x%x\', L"%s" },\n' %
(s.code_point, s.ascii_text.replace('\\', '\\\\')))
(s.code_point, s.ascii_text.replace('\\', '\\\\')))
options.output.write('};\n\n')
# Work out the maximum length of an ASCII sequence.
ascii_seq_max = max(len(s.ascii_text) for s in t.symbols)
options.output.write('static const unsigned int ASCII_SEQ_MAX = %u;\n\n' %
ascii_seq_max)
ascii_seq_max)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))

View File

@ -13,15 +13,16 @@
# Based on code by Timothy Bourke, NICTA
#
import isasymbols
from __future__ import print_function
import os, sys
import os
import sys
# Make isasymbols importable.
sys.path.append(os.path.join(os.path.dirname(__file__), '../pysymbols'))
import isasymbols
translator = isasymbols.make_translator(os.path.join(os.path.dirname(__file__),
'../../isabelle/etc/symbols'))
'../../isabelle/etc/symbols'))
if len(sys.argv) > 1:
f = open(sys.argv[1], 'r')

View File

@ -12,15 +12,16 @@
# Based on code by Timothy Bourke, NICTA
#
import isasymbols
from __future__ import print_function
import os, sys
import os
import sys
# Make isasymbols importable.
sys.path.append(os.path.join(os.path.dirname(__file__), '../pysymbols'))
import isasymbols
translator = isasymbols.make_translator(os.path.join(os.path.dirname(__file__),
'../../isabelle/etc/symbols'))
'../../isabelle/etc/symbols'))
if len(sys.argv) > 1:
f = open(sys.argv[1], 'r')

View File

@ -49,14 +49,17 @@ MAX_EMAILS_PER_RUN = 10
# Footer at the bottom of emails
BODY_FOOTER = ["", "-- ", "Sent with ❤ by 'commit-email.py'."]
def as_utf8(s):
"""Interpret the given byte string as utf-8."""
assert isinstance(s, str)
return s.decode('utf-8', 'replace')
def is_unicode(s):
return isinstance(s, unicode)
def is_ascii(s):
assert is_unicode(s)
try:
@ -66,20 +69,26 @@ def is_ascii(s):
else:
return True
def encode_unicode_header(s):
if is_ascii(s):
return s
return email.Header.make_header([(s, "utf-8")]).encode()
VERBOSE = False
def debug(x):
if VERBOSE:
sys.stderr.write(x + "\n")
def get_commit_patch(repo, hexsha):
patch = repo.git.show(hexsha, patience=True, pretty="format:", stat=True, patch=True)
return as_utf8(patch)
def get_commit_branches(repo, remote, hexsha):
commit_branches = set()
for ref in remote.refs:
@ -91,6 +100,7 @@ def get_commit_branches(repo, remote, hexsha):
pass
return sorted([as_utf8(x) for x in commit_branches])
def first_line(s, max_len=256):
"""Summarise the message 's'."""
assert is_unicode(s)
@ -100,6 +110,7 @@ def first_line(s, max_len=256):
s = s[:max_len - 3] + ""
return s
def send_email(from_addr, dest_addrs, headers, body, dry_run=False):
# Ensure we only have unicode inputs, and that email addresses, header
# names are in the ASCII subset. If only we had a type system...
@ -168,8 +179,8 @@ def email_commit(from_addr, dest_addrs, repo, remote, commit, repo_name, dry_run
"commit: %s" % (as_utf8(commit.hexsha[:12])),
"author: %s <%s>" % (commit.author.name, as_utf8(commit.author.email)),
"date: %s" % (
datetime.datetime.fromtimestamp(commit.authored_date)
.strftime('%A, %-d %B %Y @ %H:%M')),
datetime.datetime.fromtimestamp(commit.authored_date)
.strftime('%A, %-d %B %Y @ %H:%M')),
"branch: %s" % (", ".join(branches)),
]
+ [""]
@ -181,19 +192,20 @@ def email_commit(from_addr, dest_addrs, repo, remote, commit, repo_name, dry_run
# Construct email
send_email(
from_addr=from_addr,
dest_addrs=dest_addrs,
headers={
"Reply-To": "%s <%s>" % (
encode_unicode_header(commit.author.name),
encode_unicode_header(as_utf8(commit.author.email))),
"From": "%s <%s>" % (
encode_unicode_header(commit.author.name), from_addr),
"Subject": encode_unicode_header(subject),
},
body="\n".join(body) + "\n",
dry_run=dry_run
)
from_addr=from_addr,
dest_addrs=dest_addrs,
headers={
"Reply-To": "%s <%s>" % (
encode_unicode_header(commit.author.name),
encode_unicode_header(as_utf8(commit.author.email))),
"From": "%s <%s>" % (
encode_unicode_header(commit.author.name), from_addr),
"Subject": encode_unicode_header(subject),
},
body="\n".join(body) + "\n",
dry_run=dry_run
)
def email_bulk_commit(from_addr, dest_addrs, repo, commits, repo_name, dry_run=False):
# Check inputs.
@ -224,41 +236,43 @@ def email_bulk_commit(from_addr, dest_addrs, repo, commits, repo_name, dry_run=F
# Construct email
send_email(
from_addr=from_addr,
dest_addrs=dest_addrs,
headers={
"From": "%s <%s>" % (
encode_unicode_header(author), from_addr),
"Reply-To": "%s <%s>" % (
encode_unicode_header(author),
encode_unicode_header(message_from_address)),
"Subject": encode_unicode_header(subject),
},
body="\n".join(body) + "\n",
dry_run=dry_run
)
from_addr=from_addr,
dest_addrs=dest_addrs,
headers={
"From": "%s <%s>" % (
encode_unicode_header(author), from_addr),
"Reply-To": "%s <%s>" % (
encode_unicode_header(author),
encode_unicode_header(message_from_address)),
"Subject": encode_unicode_header(subject),
},
body="\n".join(body) + "\n",
dry_run=dry_run
)
def main():
# Parse arguments.
parser = argparse.ArgumentParser(
description="Email new commits in a git repository.")
description="Email new commits in a git repository.")
parser.add_argument('repo', help="git repository location", metavar='REPO')
parser.add_argument('--remote', '-r',
help="remote to pull from (default 'origin')", default="origin", type=unicode)
help="remote to pull from (default 'origin')", default="origin", type=unicode)
parser.add_argument('--verbose', '-v', action="store_true",
help="be verbose")
help="be verbose")
parser.add_argument('--mark-only', action="store_true",
help="mark commits as emailed, but don't actually send off an email")
help="mark commits as emailed, but don't actually send off an email")
parser.add_argument('--dry-run', '-n', action="store_true",
help="don't do a 'git' fetch, and print emails to standard out")
help="don't do a 'git' fetch, and print emails to standard out")
parser.add_argument('--no-fetch', action="store_true",
help="don't do a 'git fetch'.")
help="don't do a 'git fetch'.")
parser.add_argument('--repo-name', help="email subject prefix", type=unicode)
parser.add_argument('--to', '-d', help="email address to send to", dest="to_addr", type=unicode)
parser.add_argument('--from', '-f', help="email address to send from", dest="from_addr", type=unicode)
parser.add_argument('--from', '-f', help="email address to send from",
dest="from_addr", type=unicode)
parser.add_argument('--max-emails', '-M', action="store",
help="maximum commit emails before we just send a single email summarising the changes",
dest="max_emails", default=MAX_EMAILS_PER_RUN)
help="maximum commit emails before we just send a single email summarising the changes",
dest="max_emails", default=MAX_EMAILS_PER_RUN)
args = parser.parse_args()
# Setup verbose debugging if neccessary.
@ -316,7 +330,7 @@ def main():
if not args.mark_only:
debug("Sending bulk email with %d commits..." % len(new_commits))
email_bulk_commit(args.from_addr, [args.to_addr], repo, new_commits,
repo_name=args.repo_name, dry_run=args.dry_run)
repo_name=args.repo_name, dry_run=args.dry_run)
if not args.dry_run:
for commit in new_commits:
db[commit.hexsha] = True
@ -327,7 +341,7 @@ def main():
if not args.mark_only:
debug("Emailing commit %s to %s..." % (commit.hexsha, args.to_addr))
email_commit(args.from_addr, [args.to_addr], repo, remote, commit,
repo_name=args.repo_name, dry_run=args.dry_run)
repo_name=args.repo_name, dry_run=args.dry_run)
if not args.dry_run:
db[commit.hexsha] = True
db.sync()
@ -335,5 +349,6 @@ def main():
# Close the database.
db.close()
if __name__ == "__main__":
main()

View File

@ -6,5 +6,6 @@
# SPDX-License-Identifier: BSD-2-Clause
#
class IsaSymbolsException(Exception):
pass

View File

@ -6,18 +6,23 @@
# SPDX-License-Identifier: BSD-2-Clause
#
import codecs, collections, numbers, re, types
import codecs
import collections
import numbers
import re
import types
from .exception import IsaSymbolsException
class Symbol(object):
def __init__(self, ascii_text, code_point, group=None, font=None,
abbreviations=None):
abbreviations=None):
assert isinstance(ascii_text, basestring)
assert isinstance(code_point, numbers.Number)
assert isinstance(group, (basestring, types.NoneType))
assert isinstance(font, (basestring, types.NoneType))
assert isinstance(abbreviations, (collections.Iterable,
types.NoneType))
types.NoneType))
self.ascii_text = ascii_text
self.code_point = code_point
@ -25,11 +30,13 @@ class Symbol(object):
self.font = font
self.abbreviations = abbreviations or []
def _extract_property(prop):
# Values of the symbol fields can contain '␣' which is intended to
# indicate a space.
return prop.replace(u'', ' ')
class Translator(object):
def __init__(self, symbols_text):
assert isinstance(symbols_text, basestring)
@ -37,7 +44,7 @@ class Translator(object):
self.symbols = []
for number, line in enumerate(x.strip() for x in
symbols_text.split('\n')):
symbols_text.split('\n')):
if line.startswith('#'):
# Comment
@ -50,9 +57,9 @@ class Translator(object):
items = line.split()
if len(items) < 3 or len(items) % 2 == 0:
raise IsaSymbolsException('%d: unexpected line format' %
number)
number)
fields = {'ascii_text':items[0]}
fields = {'ascii_text': items[0]}
for k, v in zip(*[iter(items[1:])] * 2):
@ -61,7 +68,7 @@ class Translator(object):
code = int(_extract_property(v), 16)
except ValueError:
raise IsaSymbolsException('%d: invalid code field' %
number)
number)
fields['code_point'] = code
elif k == 'group:':
@ -77,14 +84,14 @@ class Translator(object):
else:
raise IsaSymbolsException('%d: unexpected field %s' %
(number, k))
(number, k))
self.symbols.append(Symbol(**fields))
# Translation dictionaries that we'll lazily initialise later.
self._utf8_to_ascii = None
self._utf8_to_tex = None
self._hshifts_tex = None # Handling for sub-/super-scripts
self._hshifts_tex = None # Handling for sub-/super-scripts
def encode(self, data):
for symbol in self.symbols:
@ -98,7 +105,7 @@ class Translator(object):
if self._utf8_to_ascii is None:
# First time this function has been called; init the dictionary.
self._utf8_to_ascii = {unichr(s.code_point): s.ascii_text
for s in self.symbols}
for s in self.symbols}
return ''.join(self._utf8_to_ascii.get(c, c) for c in data)
@ -134,8 +141,9 @@ class Translator(object):
)
return ''.join(self._utf8_to_tex.get(c, c) for c in
reduce(lambda a, (regex, rep): regex.sub(rep, a), self._hshifts_tex,
data))
reduce(lambda a, (regex, rep): regex.sub(rep, a), self._hshifts_tex,
data))
def make_translator(symbols_file):
assert isinstance(symbols_file, basestring)

View File

@ -22,6 +22,7 @@ Example usage:
import StringIO
from .exception import IsaSymbolsException
class Proof(object):
def __init__(self, statement, name=None, debug=True):
self.statement = statement
@ -34,10 +35,10 @@ class Proof(object):
def apply(self, step, solves=0):
if self.completed:
raise IsaSymbolsException('you cannot apply steps to a completed '
'proof')
'proof')
if solves > self.subgoals:
raise IsaSymbolsException('you cannot solve more subgoals (%d) '
'than remain (%d)' % (solves, self.subgoals))
'than remain (%d)' % (solves, self.subgoals))
if ' ' in step:
step = '(%s)' % step
self.steps.append((' ' * (self.subgoals - 1), step))

View File

@ -50,14 +50,18 @@ except ImportError:
# for the total user and system times of its child processes.
# For compatibility with both versions, we ignore the additional
# values.
def cpu_time_of(process):
cpu_times = process.cpu_times()
return cpu_times[0] + cpu_times[1]
class Poller(threading.Thread):
'''Subclass of threading.Thread that monitors CPU usage of another process.
Use run() to start the process.
Use cpu_usage() to retrieve the latest estimate of CPU usage.'''
def __init__(self, pid):
super(Poller, self).__init__()
# Daemonise ourselves to avoid delaying exit of the process of our
@ -73,7 +77,7 @@ class Poller(threading.Thread):
# Remember CPU times of recently seen children.
# This is to prevent double-counting for child processes.
self.current_children = {} # {(pid, create_time): CPU time}
self.current_children = {} # {(pid, create_time): CPU time}
# CPU time of dead children is recorded here.
self.old_children_cpu = 0.0
@ -154,6 +158,7 @@ class Poller(threading.Thread):
def __exit__(self, *_):
self.finished = True
def process_poller(pid):
'''Initiate polling of a subprocess. This is intended to be used in a
`with` block.'''
@ -166,9 +171,10 @@ def process_poller(pid):
return p
def main():
if len(sys.argv) <= 1 or sys.argv[1] in ['-?', '--help']:
print('Usage: %s command args...\n Measure total CPU ' \
print('Usage: %s command args...\n Measure total CPU '
'usage of a command' % sys.argv[0], file=sys.stderr)
return -1
@ -194,5 +200,6 @@ def main():
return p.returncode
if __name__ == '__main__':
sys.exit(main())

View File

@ -11,9 +11,12 @@ to the UNIX `time` utility.
'''
from __future__ import print_function
import subprocess, sys, threading, time
import subprocess
import sys
import threading
import time
PSUTIL_NOT_AVAILABLE=False
PSUTIL_NOT_AVAILABLE = False
try:
import psutil
if not hasattr(psutil.Process, "children") and hasattr(psutil.Process, "get_children"):
@ -22,48 +25,50 @@ try:
psutil.Process.memory_maps = psutil.Process.get_memory_maps
except ImportError:
PSUTIL_NOT_AVAILABLE=True
PSUTIL_NOT_AVAILABLE = True
if PSUTIL_NOT_AVAILABLE:
def get_usage(proc):
return 0
def get_total_usage(proc):
return 0
else:
def get_usage(proc):
'''Retrieve the memory usage of a particular psutil process without its
children. We use the proportional set size, which accounts for shared pages
to give us a more accurate total usage.'''
assert isinstance(proc, psutil.Process)
try:
return sum([m.pss for m in proc.memory_maps(grouped=True)])
except psutil.AccessDenied:
# If we don't have permission to read a particular process,
# just return 0.
return 0
def get_total_usage(pid):
'''Retrieve the memory usage of a process by PID including its children. We
ignore NoSuchProcess errors to mask subprocesses exiting while the cohort
continues.'''
total = 0
# Fetch parent's usage.
try:
p = psutil.Process(pid)
total += get_usage(p)
children = p.children(recursive=True) #pylint: disable=E1123
except psutil.NoSuchProcess:
return 0
# Fetch usage of children.
for proc in children:
def get_usage(proc):
'''Retrieve the memory usage of a particular psutil process without its
children. We use the proportional set size, which accounts for shared pages
to give us a more accurate total usage.'''
assert isinstance(proc, psutil.Process)
try:
total += get_usage(proc)
except psutil.NoSuchProcess:
pass
return sum([m.pss for m in proc.memory_maps(grouped=True)])
except psutil.AccessDenied:
# If we don't have permission to read a particular process,
# just return 0.
return 0
def get_total_usage(pid):
'''Retrieve the memory usage of a process by PID including its children. We
ignore NoSuchProcess errors to mask subprocesses exiting while the cohort
continues.'''
total = 0
# Fetch parent's usage.
try:
p = psutil.Process(pid)
total += get_usage(p)
children = p.children(recursive=True) # pylint: disable=E1123
except psutil.NoSuchProcess:
return 0
# Fetch usage of children.
for proc in children:
try:
total += get_usage(proc)
except psutil.NoSuchProcess:
pass
return total
return total
class Poller(threading.Thread):
def __init__(self, pid):
@ -106,6 +111,7 @@ class Poller(threading.Thread):
def __exit__(self, *_):
self.finished = True
def process_poller(pid):
'''Initiate polling of a subprocess. This is intended to be used in a
`with` block.'''
@ -118,10 +124,11 @@ def process_poller(pid):
return p
def main():
if len(sys.argv) <= 1 or sys.argv[1] in ['-?', '--help']:
print('Usage: %s command args...\n Measure peak memory ' \
'usage of a command' % sys.argv[0], out=sys.stderr)
print('Usage: %s command args...\n Measure peak memory '
'usage of a command' % sys.argv[0], out=sys.stderr)
return -1
if PSUTIL_NOT_AVAILABLE:
@ -141,7 +148,7 @@ def main():
high = 0
try:
with process_poller(p.pid) as m: #pylint: disable=E1101
with process_poller(p.pid) as m: # pylint: disable=E1101
p.communicate()
high = m.peak_mem_usage()
except KeyboardInterrupt:
@ -152,5 +159,6 @@ def main():
return p.returncode
if __name__ == '__main__':
sys.exit(main())

View File

@ -46,6 +46,7 @@ ANSI_YELLOW = "\033[33m"
ANSI_WHITE = "\033[37m"
ANSI_BOLD = "\033[1m"
def output_color(color, s):
"""Wrap the given string in the given color."""
if sys.stdout.isatty():
@ -53,6 +54,8 @@ def output_color(color, s):
return s
# Find a command in the PATH.
def which(filename):
for path in os.environ["PATH"].split(os.pathsep):
candidate = os.path.join(path, filename)
@ -66,6 +69,8 @@ def which(filename):
# We attempt to handle races where a PID goes away while we
# are looking at it, but not where a PID has been reused.
#
def kill_family(grace_period, parent_pid):
# Find process.
try:
@ -117,7 +122,7 @@ def kill_family(grace_period, parent_pid):
SKIPPED, # Failed dependencies
ERROR, # Failed to run test at all
TIMEOUT, # Wall timeout
CPU_TIMEOUT, # CPU timeout
CPU_TIMEOUT, # CPU timeout
STUCK, # No CPU activity detected
CANCELLED # Cancelled for external reasons
) = range(9)
@ -134,6 +139,7 @@ status_name = ['RUNNING (***bug***)',
'CANCELLED']
status_maxlen = max(len(s) for s in status_name[1:]) + len(" *")
def run_test(test, status_queue, kill_switch,
verbose=False, stuck_timeout=None,
timeout_scale=1.0, timeouts_enabled=True,
@ -180,8 +186,8 @@ def run_test(test, status_queue, kill_switch,
peak_mem_usage = None
try:
process = subprocess.Popen(command,
stdout=output, stderr=subprocess.STDOUT, stdin=subprocess.PIPE,
cwd=test.cwd)
stdout=output, stderr=subprocess.STDOUT, stdin=subprocess.PIPE,
cwd=test.cwd)
except:
output = "Exception while running test:\n\n%s" % (traceback.format_exc())
if verbose:
@ -235,9 +241,9 @@ def run_test(test, status_queue, kill_switch,
scaled_cpu_timeout = test.cpu_timeout * timeout_scale
with cpuusage.process_poller(process.pid) as c:
# Inactivity timeout
low_cpu_usage = 0.05 # 5%
cpu_history = collections.deque() # sliding window
cpu_usage_total = [0] # workaround for variable scope
low_cpu_usage = 0.05 # 5%
cpu_history = collections.deque() # sliding window
cpu_usage_total = [0] # workaround for variable scope
# Also set a CPU timeout. We poll the cpu usage periodically.
def cpu_timeout():
@ -318,24 +324,29 @@ def run_test(test, status_queue, kill_switch,
'mem_usage': peak_mem_usage})
# run a heuristic script for getting some output on a timeout
def extra_timeout_output(test_name):
# we expect the script to be in the same directory as run_tests.py
here = os.path.dirname(os.path.abspath(__file__))
command = [os.path.join(here, 'timeout_output'), test_name]
try:
process = subprocess.Popen(command,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
(output, _) = process.communicate()
return output.decode('utf8')
except Exception as e:
return ("Exception launching timeout_output: %s" % str(e))
# Print a status line.
def print_test_line_start(test_name):
if sys.stdout.isatty():
print(" Started %-25s " % (test_name + " ..."))
sys.stdout.flush()
def print_test_line(test_name, color, status, real_time=None, cpu_time=None, mem=None):
if mem is not None:
# Report memory usage in gigabytes.
@ -365,6 +376,8 @@ def print_test_line(test_name, color, status, real_time=None, cpu_time=None, mem
#
# Recursive glob
#
def rglob(base_dir, pattern):
matches = []
extras = []
@ -374,11 +387,13 @@ def rglob(base_dir, pattern):
for filename in fnmatch.filter(filenames, 'extra_tests'):
f = os.path.join(root, filename)
extras.extend([os.path.join(root, l.strip())
for l in open(f) if l.strip()])
for l in open(f) if l.strip()])
matches.extend([f for e in extras for f in rglob(e, pattern)])
return sorted(set(matches))
# Print info about tests.
def print_tests(msg, tests, verbose):
if verbose:
print('%d tests %s:' % (len(tests), msg))
@ -389,6 +404,7 @@ def print_tests(msg, tests, verbose):
print(' #> ' + t.command)
print(' -- depends: %s' % list(t.depends))
def print_test_deps(tests):
print('digraph "tests" {')
for t in tests:
@ -399,47 +415,49 @@ def print_test_deps(tests):
#
# Run tests.
#
def main():
# Parse arguments
parser = argparse.ArgumentParser(description="Parallel Regression Framework",
epilog="RUN_TESTS_DEFAULT can be used to overwrite the default set of tests")
parser.add_argument("-d", "--directory", action="store",
metavar="DIR", help="directory to search for test files",
default=os.getcwd())
metavar="DIR", help="directory to search for test files",
default=os.getcwd())
parser.add_argument("--brief", action="store_true",
help="don't print failure logs at end of test run")
help="don't print failure logs at end of test run")
parser.add_argument("-f", "--fail-fast", action="store_true",
help="exit once the first failure is detected")
help="exit once the first failure is detected")
parser.add_argument("-j", "--jobs", type=int, default=1,
help="Number of tests to run in parallel")
help="Number of tests to run in parallel")
parser.add_argument("-l", "--list", action="store_true",
help="list all known tests (-v for details)")
help="list all known tests (-v for details)")
parser.add_argument("-L", "--dry-run", action="store_true",
help="list tests to be run (-v for details)")
help="list tests to be run (-v for details)")
parser.add_argument("--no-dependencies", action="store_true",
help="don't check for dependencies when running specific tests")
help="don't check for dependencies when running specific tests")
parser.add_argument("-x", "--exclude", action="append", metavar="TEST", default=[],
help="exclude the given test; tests depending on it may still run")
help="exclude the given test; tests depending on it may still run")
parser.add_argument("-r", "--remove", action="append", metavar="TEST", default=[],
help="remove the given test and tests that depend on it")
help="remove the given test and tests that depend on it")
parser.add_argument("-v", "--verbose", action="store_true",
help="print test output or list more details")
help="print test output or list more details")
parser.add_argument("--dot", action="store_true",
help="for -l or -L, output test dependencies in GraphViz format")
help="for -l or -L, output test dependencies in GraphViz format")
parser.add_argument("--junit-report", metavar="FILE",
help="write JUnit-style test report")
help="write JUnit-style test report")
parser.add_argument("--stuck-timeout", type=int, default=600, metavar='N',
help="timeout tests if not using CPU for N seconds (default: 600)")
help="timeout tests if not using CPU for N seconds (default: 600)")
timeout_mod_args = parser.add_mutually_exclusive_group()
timeout_mod_args.add_argument("--scale-timeouts", type=float, default=1, metavar='N',
help="multiply test timeouts by N (e.g. 2 provides twice as much time)")
help="multiply test timeouts by N (e.g. 2 provides twice as much time)")
timeout_mod_args.add_argument("--no-timeouts", action="store_true",
help="do not enforce any test timeouts")
help="do not enforce any test timeouts")
parser.add_argument("--grace-period", type=float, default=5, metavar='N',
help="interrupt over-time processes N seconds before killing them (default: 5)")
help="interrupt over-time processes N seconds before killing them (default: 5)")
parser.add_argument("tests", metavar="TESTS",
help="select these tests to run (defaults to all tests)",
nargs="*")
help="select these tests to run (defaults to all tests)",
nargs="*")
args = parser.parse_args()
if args.jobs < 1:
@ -465,11 +483,13 @@ def main():
desired_names = set(args.tests) or set(os.environ.get('RUN_TESTS_DEFAULT', '').split())
bad_names = desired_names - set([t.name for t in all_tests])
if bad_names:
parser.error("These tests are requested, but do not exist: %s" % (", ".join(sorted(bad_names))))
parser.error("These tests are requested, but do not exist: %s" %
(", ".join(sorted(bad_names))))
def get_tests(names):
'''Given a set of names, return the corresponding set of Tests.'''
return {t for t in all_tests if t.name in names}
def add_deps(x):
'''Given a set of Tests, add all dependencies to it.'''
x.update({t for w in x for t in add_deps(get_tests(w.depends))})
@ -508,7 +528,8 @@ def main():
bad_names = set.union(exclude_tests, set(args.remove)) - {t.name for t in all_tests}
if bad_names:
sys.stderr.write("Warning: These tests are excluded/removed, but do not exist: %s\n" % (", ".join(sorted(bad_names))))
sys.stderr.write("Warning: These tests are excluded/removed, but do not exist: %s\n" %
(", ".join(sorted(bad_names))))
if args.dry_run:
if args.dot:
@ -534,6 +555,7 @@ def main():
# current jobs on the bottom line of the tty.
# We cache this status line to help us wipe it later.
tty_status_line = [""]
def wipe_tty_status():
if tty_status_line[0]:
print(" " * len(tty_status_line[0]) + "\r", end="")
@ -584,7 +606,7 @@ def main():
# Wait for jobs to complete.
try:
while True:
info = status_queue.get(block=True, timeout=0.1337) # Built-in pause
info = status_queue.get(block=True, timeout=0.1337) # Built-in pause
name, status = info['name'], info['status']
test_results[name] = info
@ -615,6 +637,7 @@ def main():
# Print failure summaries unless requested not to.
if not args.brief and len(failed_tests) > 0:
LINE_LIMIT = 40
def print_line():
print("".join(["-" for x in range(72)]))
print("")
@ -677,8 +700,8 @@ def main():
# Print summary.
print(("\n\n"
+ output_color(ANSI_WHITE, "%d/%d tests succeeded.") + "\n")
% (len(tests_to_run) - len(failed_tests), len(tests_to_run)))
+ output_color(ANSI_WHITE, "%d/%d tests succeeded.") + "\n")
% (len(tests_to_run) - len(failed_tests), len(tests_to_run)))
if len(failed_tests) > 0:
print(output_color(ANSI_RED, "Tests failed.") + "\n")
if kill_switch.is_set():

View File

@ -15,9 +15,11 @@ from lxml import etree
REGRESSION_DIR = os.path.dirname(os.path.realpath(__file__))
REGRESSION_DTD = os.path.join(REGRESSION_DIR, "regression.dtd")
class TestSpecParseException(Exception):
pass
class TestEnv(object):
def __init__(self, base_dir):
@ -44,6 +46,7 @@ class TestEnv(object):
setattr(new_env, name, new)
return new_env
def parse_attributes(tag, env):
"""Parse attributes such as "timeout" in the given XML tag,
returning an updated env to reflect them."""
@ -52,7 +55,8 @@ def parse_attributes(tag, env):
def parse_attr(name, xml_name, cast):
value = tag.get(xml_name)
if value is not None: updates[name] = cast(value)
if value is not None:
updates[name] = cast(value)
parse_attr("cwd", "cwd", str)
parse_attr("timeout", "timeout", int)
@ -61,6 +65,7 @@ def parse_attributes(tag, env):
return env.update(updates) if updates else env
class Test(object):
__slots__ = (
@ -77,13 +82,16 @@ class Test(object):
self.cpu_timeout = env.cpu_timeout
self.depends = env.depends
def parse_test(doc, env):
test = Test(doc.get("name"), doc.text.strip(), env)
return [test]
def tests_names(tests):
return [t.name for t in tests]
def parse_sequence(doc, env):
tests = []
@ -94,6 +102,7 @@ def parse_sequence(doc, env):
return tests
def parse_set(doc, env):
tests = []
@ -102,6 +111,7 @@ def parse_set(doc, env):
return tests
parsers = {
'testsuite': parse_set,
'set': parse_set,
@ -109,6 +119,7 @@ parsers = {
'test': parse_test
}
def parse_tag(doc, env):
try:
parser = parsers[doc.tag]
@ -116,6 +127,7 @@ def parse_tag(doc, env):
raise TestSpecParseException("Unknown tag '%s'" % doc.tag)
return parser(doc, parse_attributes(doc, env))
def validate_xml(doc, filename):
"""Ensure the XML matches the regression DTD."""
@ -126,8 +138,9 @@ def validate_xml(doc, filename):
# Parse the file, and validate against the DTD.
if not dtd.validate(doc):
raise Exception(
"%s does not validate against DTD:\n\n" % filename
+ str(dtd.error_log))
"%s does not validate against DTD:\n\n" % filename
+ str(dtd.error_log))
def parse_testsuite_xml(filename):
# Parse the file.
@ -144,6 +157,7 @@ def parse_testsuite_xml(filename):
# Returns a list of all tests found in this file.
return parse_tag(doc.getroot(), env)
def parse_test_files(xml_files):
tests = []
for x in xml_files:
@ -154,15 +168,19 @@ def parse_test_files(xml_files):
raise
return tests
def show_names(names):
return ' '.join(sorted(names))
def check_tests(tests):
# Check that test names are unique.
names, dups = set(), set()
for n in tests_names(tests):
if n in names: dups.add(n)
else: names.add(n)
if n in names:
dups.add(n)
else:
names.add(n)
if dups:
raise TestSpecParseException(
"Duplicate test names: %s" % show_names(dups))
@ -173,11 +191,13 @@ def check_tests(tests):
raise TestSpecParseException(
"Invalid dependencies: %s" % show_names(bad_depends))
def step_rel(rel):
# From a one-step relation represented as a dictionary,
# generate the corresponding one-or-two-step relation.
return dict((s, rel[s].union(*(rel[t] for t in rel[s]))) for s in rel)
def trans_depends(rel):
# Repeatedly add dependencies of dependencies until convergence.
rel_t = step_rel(rel)
@ -185,13 +205,17 @@ def trans_depends(rel):
rel, rel_t = rel_t, step_rel(rel_t)
return rel_t
def refl_depends(rel):
rel_r = {}
for t in rel: rel_r[t] = rel[t] | {t}
for t in rel:
rel_r[t] = rel[t] | {t}
return rel_r
class Depends(object):
__slots__ = 'step', 'trans', 'rtrans'
def __init__(self, step):
trans = trans_depends(step)
rtrans = refl_depends(trans)
@ -202,14 +226,17 @@ class Depends(object):
# Allow the user to customise handling of missing keys,
# but by default, raise the appropriate KeyError.
def result(x, fail=lambda x: rel[x]):
if x in rel: return rel[x]
else: return fail(x)
if x in rel:
return rel[x]
else:
return fail(x)
return result
self.step = lookup(step)
self.trans = lookup(trans)
self.rtrans = lookup(rtrans)
def collect_dependencies(tests):
forward, reverse = {}, {}
for t in tests:
@ -217,6 +244,7 @@ def collect_dependencies(tests):
reverse[t.name] = frozenset(r.name for r in tests if t.name in r.depends)
return Depends(forward), Depends(reverse)
def toposort(keys, forward_depends, reverse_depends):
"""Topological sort.
@ -253,14 +281,17 @@ def toposort(keys, forward_depends, reverse_depends):
return result
class TestInfo(object):
__slots__ = 'tests', 'tests_by_name', 'forward_deps', 'reverse_deps'
def __init__(self, tests, tests_by_name, forward_deps, reverse_deps):
self.tests = tests
self.tests_by_name = tests_by_name
self.forward_deps = forward_deps
self.reverse_deps = reverse_deps
def process_tests(tests):
"""Given a list of tests (possibly from multiple XML file), check for
errors and return a list of tests in dependency-satisfying order."""
@ -292,14 +323,16 @@ def process_tests(tests):
return TestInfo(tests, tests_by_name, forward_deps, reverse_deps)
def process_test_files(xml_files):
return process_tests(parse_test_files(xml_files))
def main():
# Parse arguments
parser = argparse.ArgumentParser(description="Regression Framework Testspec Parser")
parser.add_argument("file", metavar="FILE", type=str, nargs="*",
help="a regression XML file to parse")
help="a regression XML file to parse")
args = parser.parse_args()
# Ensure we have at least one file.
@ -314,5 +347,6 @@ def main():
print("\"%s\" [timeout=%d, cpu-timeout=%g, parents=%s, cwd=%s]" % (
test.command, test.timeout, test.cpu_timeout, ",".join(test.depends), test.cwd))
if __name__ == "__main__":
main()

View File

@ -17,23 +17,23 @@ import glob
# Parse arguments
parser = optparse.OptionParser()
parser.add_option("-o", "--output", dest="output",
help="output file", metavar="FILE")
help="output file", metavar="FILE")
parser.add_option("-i", "--input-dir", dest="input_dir",
help="input directory", action="append", default=[],
metavar="DIR")
help="input directory", action="append", default=[],
metavar="DIR")
parser.add_option("-s", "--session-name", dest="session_name",
help="isabelle session name", metavar="NAME")
help="isabelle session name", metavar="NAME")
parser.add_option("-b", "--base-session", dest="base_session",
help="isabelle base session", metavar="NAME")
help="isabelle base session", metavar="NAME")
parser.add_option("-d", "--named-session-dependency", dest="session_dependencies",
help="additional named session dependency", action="append", default=[],
metavar="NAME")
help="additional named session dependency", action="append", default=[],
metavar="NAME")
parser.add_option("-q", "--quick-and-dirty", dest="quick_and_dirty",
help="ROOT file should compile with \"quick and dirty\" enabled.",
action="store_true", default=False)
help="ROOT file should compile with \"quick and dirty\" enabled.",
action="store_true", default=False)
parser.add_option("-T", "--generate-theory", dest="theory_file",
action="store_true", default=False,
help="Generate a theory file instead of a ROOT file.")
action="store_true", default=False,
help="Generate a theory file instead of a ROOT file.")
(options, args) = parser.parse_args()
# Check arguments
@ -58,6 +58,7 @@ with open(options.output, "w") as output:
if options.theory_file:
session_name = os.path.splitext(os.path.basename(options.output))[0]
def import_name(file):
return os.path.splitext(os.path.relpath(file, os.path.dirname(options.output)))[0]

View File

@ -5,55 +5,60 @@
# SPDX-License-Identifier: BSD-2-Clause
#
import sys, re
import sys
import re
def strip_comments(s):
s2 = ''
d = 0
quote = False
i, l = 0, len(s)
while i < l:
if quote == False and s[i:i+2] == '(*':
d += 1
i += 2
elif quote == False and s[i:i+2] == '*)' and d > 0:
d -= 1
i += 2
else:
if d == 0:
s2 += s[i]
if quote == s[i]:
quote = False
elif s[i] in ['"', "'"]:
quote = s[i]
i += 1
return s2
s2 = ''
d = 0
quote = False
i, l = 0, len(s)
while i < l:
if quote == False and s[i:i+2] == '(*':
d += 1
i += 2
elif quote == False and s[i:i+2] == '*)' and d > 0:
d -= 1
i += 2
else:
if d == 0:
s2 += s[i]
if quote == s[i]:
quote = False
elif s[i] in ['"', "'"]:
quote = s[i]
i += 1
return s2
def unquote(s):
if s[:1] == '"' and s[-1:] == '"':
return s[1:-1]
return s
if s[:1] == '"' and s[-1:] == '"':
return s[1:-1]
return s
def get(dir):
sessions = []
try:
root = strip_comments(''.join(open(dir + '/ROOT').readlines()))
sessions += [unquote(s) for s in re.findall('session\s+("[^"]*"|\S+)', root)]
except IOError:
pass
try:
roots = [l.strip() for l in open(dir + '/ROOTS').readlines() if l.strip()[:1] != '#']
for dir2 in roots:
sessions += get(dir + '/' + dir2)
except IOError:
pass
return sessions
sessions = []
try:
root = strip_comments(''.join(open(dir + '/ROOT').readlines()))
sessions += [unquote(s) for s in re.findall('session\s+("[^"]*"|\S+)', root)]
except IOError:
pass
try:
roots = [l.strip() for l in open(dir + '/ROOTS').readlines() if l.strip()[:1] != '#']
for dir2 in roots:
sessions += get(dir + '/' + dir2)
except IOError:
pass
return sessions
if '-h' in sys.argv or '--help' in sys.argv:
print('Usage: %s DIRS...' % sys.argv[0])
print('Print Isabelle session names defined in DIRS.')
print('Usage: %s DIRS...' % sys.argv[0])
print('Print Isabelle session names defined in DIRS.')
else:
sessions = []
for dir in sys.argv[1:]:
sessions += get(dir)
print('\n'.join(sessions))
sessions = []
for dir in sys.argv[1:]:
sessions += get(dir)
print('\n'.join(sessions))

View File

@ -18,6 +18,8 @@ import tempfile
import shutil
# Create a temporary directory
class TempDir(object):
def __enter__(self, cleanup=True):
self.filename = tempfile.mkdtemp()
@ -29,14 +31,15 @@ class TempDir(object):
shutil.rmtree(self.filename)
return False
parser = argparse.ArgumentParser(
description="Generate a 'umm_types.txt' file from the C parser, required by the bitfield generator.")
description="Generate a 'umm_types.txt' file from the C parser, required by the bitfield generator.")
parser.add_argument('input', metavar='INPUT', type=str,
help="C file to parse")
help="C file to parse")
parser.add_argument('output', metavar='OUTPUT', type=str,
help="output filename")
help="output filename")
parser.add_argument('--root', metavar='ROOT', type=str,
help="add Isabelle ROOT or ROOTS file path", action='append')
help="add Isabelle ROOT or ROOTS file path", action='append')
args = parser.parse_args()
if "ISABELLE_PROCESS" not in os.environ or "ISABELLE_TOOL" not in os.environ:
@ -61,9 +64,9 @@ session UmmTypes = CParser +
# Create a new theory file and ROOT file in a temporary directory.
with TempDir() as tmp_dir:
filenames = {
"input": os.path.abspath(args.input),
"output": os.path.abspath(args.output),
}
"input": os.path.abspath(args.input),
"output": os.path.abspath(args.output),
}
with open(os.path.join(tmp_dir, "UmmTypesFile.thy"), "w") as f:
f.write(THY_DATA % filenames)
with open(os.path.join(tmp_dir, "ROOT"), "w") as f:
@ -78,8 +81,7 @@ with TempDir() as tmp_dir:
return result
print "\nGenerating umm_types data file...\n"
subprocess.check_call([
os.environ["ISABELLE_TOOL"], "build", "-c"]
+ interleave("-d", [os.path.abspath(x) for x in args.root])
+ ["-d", ".", "-v", "-b", "UmmTypes"],
cwd=tmp_dir)
os.environ["ISABELLE_TOOL"], "build", "-c"]
+ interleave("-d", [os.path.abspath(x) for x in args.root])
+ ["-d", ".", "-v", "-b", "UmmTypes"],
cwd=tmp_dir)

View File

@ -17,6 +17,8 @@ import tempfile
import time
# Create a temporary directory
class TempDir():
def __init__(self, cleanup=True):
self.cleanup = cleanup
@ -30,6 +32,7 @@ class TempDir():
shutil.rmtree(self.filename)
return False
def mkdir_p(path):
try:
os.makedirs(path)
@ -39,6 +42,7 @@ def mkdir_p(path):
else:
raise
def rglob(base_path, pattern):
"""Recursively find files matching glob pattern 'pattern', from base
directory 'base_path'."""
@ -48,6 +52,7 @@ def rglob(base_path, pattern):
results.append(os.path.join(root, filename))
return results
def read_manifest(filename, base):
"""Read the files described in a MANIFEST file, which has the form:
@ -73,6 +78,7 @@ def read_manifest(filename, base):
results += [(base_dir, x) for x in g]
return results
def copy_manifest(output_dir, manifest_file, manifest_base, target):
"""
Given a manifest file "manifest_file" and a base directory "manifest_base"
@ -95,30 +101,31 @@ def copy_manifest(output_dir, manifest_file, manifest_base, target):
shutil.copyfile(src, dst)
return all_files
# Check usage.
parser = argparse.ArgumentParser(
description='Generate autocorres release tarball.')
description='Generate autocorres release tarball.')
parser.add_argument('version', metavar='VERSION',
type=str, help='Version number of the release, such as "0.95-beta1".')
type=str, help='Version number of the release, such as "0.95-beta1".')
parser.add_argument('cparser_tar', metavar='CPARSER_RELEASE',
type=str, help='Tarball to the C parser release.')
type=str, help='Tarball to the C parser release.')
parser.add_argument('isabelle_tar', metavar='ISABELLE_TARBALL',
type=str, help='Tarball to the official Isabelle release')
type=str, help='Tarball to the official Isabelle release')
parser.add_argument('-b', '--browse', action='store_true',
help='Open shell to browse output prior to tar\'ing.')
help='Open shell to browse output prior to tar\'ing.')
parser.add_argument('-o', '--output', metavar='FILENAME',
default=None, help='Output filename. Defaults to "autocorres-<VERSION>.tar.gz".')
default=None, help='Output filename. Defaults to "autocorres-<VERSION>.tar.gz".')
parser.add_argument('--dry-run', action='store_true',
help='Do not output any files.', default=False)
help='Do not output any files.', default=False)
parser.add_argument('-t', '--test', action='store_true', default=False,
help='Test the archive.')
help='Test the archive.')
parser.add_argument('--no-cleanup', action='store_true',
help='Don''t delete temporary directories.', default=False)
help='Don''t delete temporary directories.', default=False)
parser.add_argument('-r', '--repository', metavar='REPO',
type=str, help='Path to the L4.verified repository base.', default=None)
type=str, help='Path to the L4.verified repository base.', default=None)
parser.add_argument('--archs', metavar='ARCH,...',
type=str, default='ARM,ARM_HYP,X64',
help='L4V_ARCHs to include (comma-separated)')
type=str, default='ARM,ARM_HYP,X64',
help='L4V_ARCHs to include (comma-separated)')
args = parser.parse_args()
args.archs = args.archs.split(',')
@ -191,29 +198,29 @@ with TempDir(cleanup=(not args.no_cleanup)) as base_dir:
# Copy various other files.
shutil.copyfile(
os.path.join(args.repository, 'lib', 'Word_Lib', 'ROOT'),
os.path.join(target_dir, 'lib', 'Word_Lib', 'ROOT'))
os.path.join(args.repository, 'lib', 'Word_Lib', 'ROOT'),
os.path.join(target_dir, 'lib', 'Word_Lib', 'ROOT'))
shutil.copyfile(
os.path.join(release_files_dir, "ROOT.release"),
os.path.join(target_dir, "autocorres", "ROOT"))
os.path.join(release_files_dir, "ROOT.release"),
os.path.join(target_dir, "autocorres", "ROOT"))
shutil.copyfile(
os.path.join(release_files_dir, "ROOTS.base_dir"),
os.path.join(target_dir, "ROOTS"))
os.path.join(release_files_dir, "ROOTS.base_dir"),
os.path.join(target_dir, "ROOTS"))
for i in ["README", "ChangeLog"]:
shutil.copyfile(
os.path.join(release_files_dir, i),
os.path.join(target_dir, i))
os.path.join(release_files_dir, i),
os.path.join(target_dir, i))
shutil.copyfile(
os.path.join(release_files_dir, "CONTRIBUTORS"),
os.path.join(target_dir, "autocorres", "CONTRIBUTORS"))
os.path.join(release_files_dir, "CONTRIBUTORS"),
os.path.join(target_dir, "autocorres", "CONTRIBUTORS"))
# License files.
shutil.copyfile(
os.path.join(args.repository, "LICENSE_BSD2.txt"),
os.path.join(target_dir, "LICENSE_BSD2.txt"))
os.path.join(args.repository, "LICENSE_BSD2.txt"),
os.path.join(target_dir, "LICENSE_BSD2.txt"))
shutil.copyfile(
os.path.join(args.repository, "LICENSE_GPLv2.txt"),
os.path.join(target_dir, "LICENSE_GPLv2.txt"))
os.path.join(args.repository, "LICENSE_GPLv2.txt"),
os.path.join(target_dir, "LICENSE_GPLv2.txt"))
# Extract dependent sessions in lib. FIXME: rather kludgy
print('Extracting sessions from lib/ROOT...')
@ -266,7 +273,7 @@ end
'''.strip() % (base_name,
os.path.basename(c_file),
os.path.basename(c_file)
))
))
for f in glob.glob(os.path.join(target_dir, "autocorres", "tests", "parse-tests", "*.c")):
gen_thy_file(f)
@ -277,7 +284,7 @@ end
"-i", os.path.join(target_dir, "autocorres", "tests", "parse-tests"),
"-i", os.path.join(target_dir, "autocorres", "tests", "proof-tests"),
"-i", os.path.join(target_dir, "autocorres", "tests", "examples"),
])
])
# Update include paths: change "../../lib" to "../lib".
def inplace_replace_string(filename, old_string, new_string):
@ -295,14 +302,19 @@ end
print("Extracting C parser...")
# We want to mix the C parser directory structure around a little.
with TempDir() as c_parser_working_dir:
subprocess.check_call(["tar", "-xz", "-C", c_parser_working_dir, "--strip-components=1", "-f", args.cparser_tar])
subprocess.check_call(["tar", "-xz", "-C", c_parser_working_dir,
"--strip-components=1", "-f", args.cparser_tar])
# The C parser uses mllex and mlyacc to generate its grammar. We build
# the grammar files so that our release won't have a dependency on mlton.
print("Generating C parser grammar files...")
subprocess.check_call(['make', 'c-parser-deps'], cwd=os.path.join(c_parser_working_dir, "src", "c-parser"))
shutil.move(os.path.join(c_parser_working_dir, "src", "c-parser"), os.path.join(target_dir, "c-parser"))
shutil.move(os.path.join(c_parser_working_dir, "README.md"), os.path.join(target_dir, "c-parser", "README.md"))
shutil.move(os.path.join(c_parser_working_dir, "doc", "ctranslation.pdf"), os.path.join(target_dir, "c-parser", "doc", "ctranslation.pdf"))
subprocess.check_call(['make', 'c-parser-deps'],
cwd=os.path.join(c_parser_working_dir, "src", "c-parser"))
shutil.move(os.path.join(c_parser_working_dir, "src", "c-parser"),
os.path.join(target_dir, "c-parser"))
shutil.move(os.path.join(c_parser_working_dir, "README.md"),
os.path.join(target_dir, "c-parser", "README.md"))
shutil.move(os.path.join(c_parser_working_dir, "doc", "ctranslation.pdf"),
os.path.join(target_dir, "c-parser", "doc", "ctranslation.pdf"))
# Double-check our theory dependencies.
if os.path.exists(thydeps_tool):
@ -345,7 +357,8 @@ end
print("Extracting Isabelle...")
base_isabelle_dir = os.path.join(base_dir, "isabelle")
mkdir_p(base_isabelle_dir)
subprocess.check_call(["tar", "-xz", "-C", base_isabelle_dir, "--strip-components=1", "-f", args.isabelle_tar])
subprocess.check_call(["tar", "-xz", "-C", base_isabelle_dir,
"--strip-components=1", "-f", args.isabelle_tar])
base_isabelle_bin = os.path.join(base_isabelle_dir, "bin", "isabelle")
assert os.path.exists(base_isabelle_bin)
@ -358,7 +371,8 @@ end
# Build the docs.
try:
subprocess.check_call(
[isabelle_bin, "build", "-c", "-d", ".", "-d", "./autocorres/doc/quickstart", "-v", "AutoCorresQuickstart"],
[isabelle_bin, "build", "-c", "-d", ".", "-d",
"./autocorres/doc/quickstart", "-v", "AutoCorresQuickstart"],
cwd=os.path.join(doc_build_dir, "doc"), env=dict(os.environ, L4V_ARCH="ARM"))
except subprocess.CalledProcessError:
print("Building documentation failed.")
@ -367,8 +381,9 @@ end
# Copy the generated PDF into our output.
shutil.copyfile(
os.path.join(doc_build_dir, "doc", "autocorres", "doc", "quickstart", "output", "document.pdf"),
os.path.join(tree, "quickstart.pdf"))
os.path.join(doc_build_dir, "doc", "autocorres", "doc",
"quickstart", "output", "document.pdf"),
os.path.join(tree, "quickstart.pdf"))
print("Building documentation...")
build_docs(target_dir, base_isabelle_bin)
@ -376,8 +391,8 @@ end
if args.output != None:
print("Creating tarball...")
subprocess.check_call(["tar", "-cz", "--numeric-owner",
"--owner", "nobody", "--group", "nogroup",
"-C", base_dir, "-f", args.output, target_dir_name])
"--owner", "nobody", "--group", "nogroup",
"-C", base_dir, "-f", args.output, target_dir_name])
# Run a test if requested.
if args.test:
@ -397,4 +412,3 @@ end
if args.browse:
print("Opening shell...")
subprocess.call(user_shell, cwd=target_dir)

View File

@ -18,6 +18,8 @@ import re
import numpy
# Create a temporary directory
class TempDir(object):
def __enter__(self, cleanup=True):
self.filename = tempfile.mkdtemp()
@ -29,6 +31,7 @@ class TempDir(object):
shutil.rmtree(self.filename)
return False
def mkdir_p(path):
try:
os.makedirs(path)
@ -38,23 +41,25 @@ def mkdir_p(path):
else:
raise
def phase_sort_key(x):
d = {
"CParser" : 1,
"L1" : 2,
"L1except" : 3,
"L1peep" : 4,
"L2" : 5,
"L2simp" : 6,
"HL" : 7,
"HLsimp" : 8,
"TS" : 9,
"polish" : 10,
}
"CParser": 1,
"L1": 2,
"L1except": 3,
"L1peep": 4,
"L2": 5,
"L2simp": 6,
"HL": 7,
"HLsimp": 8,
"TS": 9,
"polish": 10,
}
if x in d:
return (d[x], x)
return (100, x)
def write_theory_file(f, enable_stats):
f.write("""
theory Stats
@ -99,26 +104,27 @@ def write_theory_file(f, enable_stats):
end
""" % {
"time" : str(datetime.datetime.now()),
"enable_stats" : "true" if enable_stats else "false",
"time": str(datetime.datetime.now()),
"enable_stats": "true" if enable_stats else "false",
})
# Check usage.
parser = argparse.ArgumentParser(
description='Generate statistics about AutoCorres and the C Parser.')
description='Generate statistics about AutoCorres and the C Parser.')
parser.add_argument('input', metavar='INPUT',
type=str, help='Input C source file.')
type=str, help='Input C source file.')
parser.add_argument('-n', '--no-isabelle',
default=False, action='store_true',
help="Don't run Isabelle, but reuse previous log file.")
default=False, action='store_true',
help="Don't run Isabelle, but reuse previous log file.")
parser.add_argument('-b', '--browse', default=False, action='store_true',
help='Open shell in temp directory.')
help='Open shell in temp directory.')
parser.add_argument('-r', '--root', action='store', type=str,
help='Root to l4.verified directory.', default=None)
help='Root to l4.verified directory.', default=None)
parser.add_argument('-R', '--repeats', action='store',
help='Number of runs for timing data.', default=1)
help='Number of runs for timing data.', default=1)
parser.add_argument('-o', '--output', metavar='OUTPUT',
default="/dev/stdout", type=str, help='Output file.')
default="/dev/stdout", type=str, help='Output file.')
args = parser.parse_args()
if args.root == None and not args.no_isabelle:
@ -136,7 +142,8 @@ with TempDir() as tmp_dir:
shutil.copyfile(args.input, os.path.join(tmp_dir, "input.c"))
# Get lines of code.
lines_of_code = int(subprocess.check_output(["c_count", args.input]).strip().split("\n")[-1])
lines_of_code = int(subprocess.check_output(
["c_count", args.input]).strip().split("\n")[-1])
# Generate a root file.
with open(os.path.join(tmp_dir, "ROOT"), "w") as f:
@ -156,10 +163,12 @@ with TempDir() as tmp_dir:
# Process theory file with Isabelle.
if not args.no_isabelle:
subprocess.check_call(["isabelle", "build", "-v", "-d", args.root, "-d", ".", "-c", "Stats"], cwd=tmp_dir)
subprocess.check_call(["isabelle", "build", "-v", "-d", args.root,
"-d", ".", "-c", "Stats"], cwd=tmp_dir)
# Fetch log file.
log_data = subprocess.check_output(["isabelle", "env", "sh", "-c" ,"zcat ${ISABELLE_OUTPUT}/log/Stats.gz"])
log_data = subprocess.check_output(
["isabelle", "env", "sh", "-c", "zcat ${ISABELLE_OUTPUT}/log/Stats.gz"])
phase_lines_of_spec = {}
phase_term_size = {}
phase_num_functions = {}
@ -187,8 +196,9 @@ with TempDir() as tmp_dir:
output.write("\n")
output.write("%-10s %10s %10s\n" % ("Phase", "LoS", "Term Size"))
for phase in sorted(phase_term_size.keys(),key=phase_sort_key):
output.write("%-10s %10d %10d\n" % (phase, phase_lines_of_spec[phase], phase_term_size[phase] / phase_num_functions[phase]))
for phase in sorted(phase_term_size.keys(), key=phase_sort_key):
output.write("%-10s %10d %10d\n" % (phase,
phase_lines_of_spec[phase], phase_term_size[phase] / phase_num_functions[phase]))
output.write("\n")
#
@ -206,10 +216,12 @@ with TempDir() as tmp_dir:
ac_real_time = []
for i in xrange(int(args.repeats)):
if not args.no_isabelle:
subprocess.check_call(["isabelle", "build", "-v", "-o", "threads=1", "-d", args.root, "-d", ".", "-c", "Stats"], cwd=tmp_dir)
subprocess.check_call(["isabelle", "build", "-v", "-o", "threads=1",
"-d", args.root, "-d", ".", "-c", "Stats"], cwd=tmp_dir)
# Fetch log file.
log_data = subprocess.check_output(["isabelle", "env", "sh", "-c" ,"zcat ${ISABELLE_OUTPUT}/log/Stats.gz"])
log_data = subprocess.check_output(
["isabelle", "env", "sh", "-c", "zcat ${ISABELLE_OUTPUT}/log/Stats.gz"])
for line in log_data.replace("\r", "").split("\n"):
if line.startswith("C Parser CPU Time"):
c_parser_cpu_time.append(int(line.split(":")[1]))
@ -226,7 +238,7 @@ with TempDir() as tmp_dir:
("C Parser Real Time", c_parser_real_time),
("AutoCorres CPU Time", ac_cpu_time),
("AutoCorres Real Time", ac_real_time),
]:
]:
output.write("%s: %.2f (%.2f)\n" % (name, numpy.mean(values), numpy.std(values)))
output.write(" samples: " + (", ".join([str(x) for x in values])) + "\n")
@ -240,4 +252,3 @@ with TempDir() as tmp_dir:
if args.browse:
subprocess.call("zsh", cwd=tmp_dir)
raise

View File

@ -12,11 +12,13 @@ import locale
locale.setlocale(locale.LC_ALL, 'en_US.UTF8')
def fetch_match(str, r):
m = re.search(r, str)
assert m != None
return m.group(1)
for i in sys.argv[1:]:
with open(i) as f:
data = "\n".join(f.readlines())
@ -33,7 +35,6 @@ for i in sys.argv[1:]:
c_secs = int(c_time) / 1000.0
ac_secs = int(ac_time) / 1000.0
c_los = intc(fetch_match(data, "CParser +([0-9]+)"))
ac_los = intc(fetch_match(data, "polish +([0-9]+)"))
@ -41,16 +42,16 @@ for i in sys.argv[1:]:
ac_termsize = intc(fetch_match(data, "polish +[0-9]+ +([0-9]+)"))
project = {
"rtos" : "eChronos",
"schorr_waite" : "Schorr-Waite",
"sel4" : "seL4 kernel",
"sysinit" : "CapDL SysInit",
"piccolo" : "Piccolo kernel",
}[project]
"rtos": "eChronos",
"schorr_waite": "Schorr-Waite",
"sel4": "seL4 kernel",
"sysinit": "CapDL SysInit",
"piccolo": "Piccolo kernel",
}[project]
print (" {project:<20} & {loc:>7} & {functions:>4} & "
+ "{c_secs:8.1f} & {ac_secs:8.1f} & "
+ "{c_los:>8} & {ac_los:>8} & "
print(" {project:<20} & {loc:>7} & {functions:>4} & "
+ "{c_secs:8.1f} & {ac_secs:8.1f} & "
+ "{c_los:>8} & {ac_los:>8} & "
+ "{c_termsize:>5} & {ac_termsize:>5} \\\\").format(grouping=True, **locals())
c_los_raw = int(fetch_match(data, "CParser +([0-9]+)"))
@ -59,6 +60,5 @@ for i in sys.argv[1:]:
c_termsize_raw = int(fetch_match(data, "CParser +[0-9]+ +([0-9]+)"))
ac_termsize_raw = int(fetch_match(data, "polish +[0-9]+ +([0-9]+)"))
print (" % percent : LoS: {:%} TS: {:%}".format(
float(c_los_raw - ac_los_raw) / c_los_raw, float(c_termsize_raw - ac_termsize_raw) / c_termsize_raw))
print(" % percent : LoS: {:%} TS: {:%}".format(
float(c_los_raw - ac_los_raw) / c_los_raw, float(c_termsize_raw - ac_termsize_raw) / c_termsize_raw))

View File

@ -57,19 +57,21 @@ def parse(call):
return ['%s\n' % line for line in lines]
def settings_line(l):
"""Adjusts some global settings."""
bits = l.split (',')
bits = l.split(',')
for bit in bits:
bit = bit.strip ()
(kind, setting) = bit.split ('=')
kind = kind.strip ()
bit = bit.strip()
(kind, setting) = bit.split('=')
kind = kind.strip()
if kind == 'keep_constructor':
[cons] = setting.split ()
[cons] = setting.split()
keep_conss[cons] = 1
else:
assert not "setting kind understood", bit
def set_global(_call):
global call
call = _call
@ -110,7 +112,7 @@ element matches pred"""
def get_defs(filename):
#if filename in file_defs:
# if filename in file_defs:
# return file_defs[filename]
cmdline = os.environ['L4CPP']
@ -363,10 +365,11 @@ def wrap_qualify(lines, deep=True):
if call.current_context:
lines.insert(0, 'end\nqualify {} (in Arch) {}'.format(call.current_context[-1],
asdfextra))
asdfextra))
lines.append('end_qualify\ncontext Arch begin global_naming %s' % call.current_context[-1])
return lines
def def_lines(d, call):
"""Produces the set of lines associated with a definition."""
if call.all_bits:
@ -554,11 +557,11 @@ def reduce_to_single_line(tree_element):
type_conv_table = {
'Maybe': 'option',
'Bool': 'bool',
'Word': 'machine_word',
'Int': 'nat',
'String': 'unit list'}
'Maybe': 'option',
'Bool': 'bool',
'Word': 'machine_word',
'Int': 'nat',
'String': 'unit list'}
def type_conv(string):
@ -1023,8 +1026,8 @@ def set_instance_proofs(header, constructor_arities, d):
classes = d.deriving
instance_proof_fns = set(
sorted((instance_proof_table[classname] for classname in classes),
key=lambda x: x.order))
sorted((instance_proof_table[classname] for classname in classes),
key=lambda x: x.order))
for f in instance_proof_fns:
(npfs, nexs) = f(header, canonical, d)
pfs.extend(npfs)
@ -1060,6 +1063,7 @@ def finite_instance_proofs(header, cons):
# wondering where the serialisable proofs went? see
# commit 21361f073bbafcfc985934e563868116810d9fa2 for last known occurence.
# leave type tags 0..11 for explicit use outside of this script
next_type_tag = 12
@ -1226,7 +1230,8 @@ def num_instance_proofs(header, canonical, d):
num_instance_proofs.order = 2
def enum_instance_proofs (header, canonical, d):
def enum_instance_proofs(header, canonical, d):
def singular_canonical():
if len(canonical) == 1:
[(_, (_, n))] = canonical
@ -1242,7 +1247,7 @@ def enum_instance_proofs (header, canonical, d):
if call.current_context:
lines.append('interpretation Arch .')
lines.append('definition')
lines.append(' enum_%s: "enum_class.enum \<equiv> map %s enum"' \
lines.append(' enum_%s: "enum_class.enum \<equiv> map %s enum"'
% (header, cons))
else:
@ -1250,49 +1255,49 @@ def enum_instance_proofs (header, canonical, d):
cons_one_arg = [cons for i, (cons, n) in canonical if n == 1]
cons_two_args = [cons for i, (cons, n) in canonical if n > 1]
assert cons_two_args == []
lines.append ('instantiation %s :: enum begin' % header)
lines.append('instantiation %s :: enum begin' % header)
if call.current_context:
lines.append('interpretation Arch .')
lines.append ('definition')
lines.append (' enum_%s: "enum_class.enum \<equiv> ' % header)
lines.append('definition')
lines.append(' enum_%s: "enum_class.enum \<equiv> ' % header)
if len(cons_no_args) == 0:
lines.append (' []')
lines.append(' []')
else:
lines.append (' [ ')
lines.append(' [ ')
for cons in cons_no_args[:-1]:
lines.append (' %s,' % cons)
lines.append(' %s,' % cons)
for cons in cons_no_args[-1:]:
lines.append (' %s' % cons)
lines.append (' ]')
lines.append(' %s' % cons)
lines.append(' ]')
for cons in cons_one_arg:
lines.append (' @ (map %s enum)' % cons)
lines.append(' @ (map %s enum)' % cons)
lines[-1] = lines[-1] + '"'
lines.append ('')
lines.append('')
for cons in cons_one_arg:
lines.append('lemma %s_map_distinct[simp]: "distinct (map %s enum)"' % (cons, cons))
lines.append(' apply (simp add: distinct_map)')
lines.append(' by (meson injI %s.inject)' % header)
lines.append('')
lines.append('definition')
lines.append(' "enum_class.enum_all (P :: %s \<Rightarrow> bool) \<longleftrightarrow> Ball UNIV P"' \
% header)
lines.append(' "enum_class.enum_all (P :: %s \<Rightarrow> bool) \<longleftrightarrow> Ball UNIV P"'
% header)
lines.append('')
lines.append('definition')
lines.append(' "enum_class.enum_ex (P :: %s \<Rightarrow> bool) \<longleftrightarrow> Bex UNIV P"' \
% header)
lines.append(' "enum_class.enum_ex (P :: %s \<Rightarrow> bool) \<longleftrightarrow> Bex UNIV P"'
% header)
lines.append('')
lines.append(' instance')
lines.append(' apply intro_classes')
lines.append(' apply (safe, simp)')
lines.append(' apply (case_tac x)')
if len(canonical) == 1:
lines.append(' apply (auto simp: enum_%s enum_all_%s_def enum_ex_%s_def' \
% (header, header, header))
lines.append(' apply (auto simp: enum_%s enum_all_%s_def enum_ex_%s_def'
% (header, header, header))
lines.append(' distinct_map_enum)')
lines.append(' done')
else:
lines.append(' apply (simp_all add: enum_%s enum_all_%s_def enum_ex_%s_def)' \
% (header, header, header))
lines.append(' apply (simp_all add: enum_%s enum_all_%s_def enum_ex_%s_def)'
% (header, header, header))
lines.append(' by fast+')
lines.append('end')
lines.append('')
@ -1310,8 +1315,8 @@ def enum_instance_proofs (header, canonical, d):
lines.append('begin')
if call.current_context:
lines.append('interpretation Arch .')
lines.append('instance by (intro_classes, simp add: enum_alt_%s)' \
% header)
lines.append('instance by (intro_classes, simp add: enum_alt_%s)'
% header)
lines.append('end')
lines.append('')
lines.append('(*>*)')
@ -1347,7 +1352,7 @@ no_proofs.order = 6
instance_proof_table = {
'Eq': no_proofs,
'Bounded': no_proofs, #enum_instance_proofs,
'Bounded': no_proofs, # enum_instance_proofs,
'Enum': enum_instance_proofs,
'Ix': no_proofs,
'Ord': no_proofs,
@ -1360,32 +1365,34 @@ instance_proof_table = {
'PSpaceStorable': pspace_storable_instance_proofs,
'Typeable': no_proofs,
'Error': no_proofs,
}
}
def bij_instance (classname, typename, constructor, fns):
def bij_instance(classname, typename, constructor, fns):
L = [
'',
'instance %s :: %s ..' % (typename, classname),
'defs (overloaded)'
]
for (fname, fstr, cast_return) in fns:
n = len (fstr.split('%s')) - 1
n = len(fstr.split('%s')) - 1
names = ('x', 'y', 'z', 'w')[:n]
names2 = tuple ([name + "'" for name in names])
names2 = tuple([name + "'" for name in names])
fstr1 = fstr % names
fstr2 = fstr % names2
L.append (' %s_%s: "%s \<equiv>' % (fname, typename, fstr1))
L.append(' %s_%s: "%s \<equiv>' % (fname, typename, fstr1))
for name in names:
L.append(" case %s of %s %s' \<Rightarrow>" \
% (name, constructor, name))
L.append(" case %s of %s %s' \<Rightarrow>"
% (name, constructor, name))
if cast_return:
L.append (' %s (%s)"' % (constructor, fstr2))
L.append(' %s (%s)"' % (constructor, fstr2))
else:
L.append (' %s"' % fstr2)
L.append(' %s"' % fstr2)
return L
def get_type_map (string):
def get_type_map(string):
"""Takes Haskell named record syntax and produces
a map (in the form of a list of tuples) defining
the converted types of the names."""
@ -1816,7 +1823,7 @@ def do_clause_pattern(line, children, type, n=0):
new_left = '%s:[%s]' % (bits[0], bits[1])
new_line = lead_ws(line) + new_left + ' <- ' + right
extra = []
return do_clause_pattern (new_line, children, type, n + 1) \
return do_clause_pattern(new_line, children, type, n + 1) \
+ extra
for lhs in left_start_table:
if left.startswith(lhs):
@ -2006,8 +2013,8 @@ def case_clauses_transform(xxx_todo_changeme5):
cases.append(case)
bodies.append((tail, c))
cases = tuple(cases) # used as key of a dict later, needs to be hashable
# (since lists are mutable, they aren't)
cases = tuple(cases) # used as key of a dict later, needs to be hashable
# (since lists are mutable, they aren't)
if not cases:
print(line)
conv = get_case_conv(cases)
@ -2414,9 +2421,9 @@ def all_constructor_conv(cases):
return conv
word_getter = re.compile (r"([a-zA-Z0-9]+)")
word_getter = re.compile(r"([a-zA-Z0-9]+)")
record_getter = re.compile (r"([a-zA-Z0-9]+\s*{[a-zA-Z0-9'\s=\,_\(\):\]\[]*})")
record_getter = re.compile(r"([a-zA-Z0-9]+\s*{[a-zA-Z0-9'\s=\,_\(\):\]\[]*})")
def extended_pattern_conv(cases):

View File

@ -42,7 +42,7 @@ for line in instructions:
output_f = open(output_tmp, 'w')
output_f.write('(* THIS FILE WAS AUTOMATICALLY GENERATED. DO NOT EDIT. *)\n')
output_f.write('(* instead, see the skeleton file %s *)\n' %
os.path.basename(input))
os.path.basename(input))
input_f = open(input)
for line in input_f:
@ -88,10 +88,10 @@ for line in instructions:
pass
else:
output_f.writelines(parsed)
elif line.startswith ("#INCLUDE_SETTINGS"):
(_, settings) = line.split (None, 1)
settings = settings.strip ()
lhs_pars.settings_line (settings)
elif line.startswith("#INCLUDE_SETTINGS"):
(_, settings) = line.split(None, 1)
settings = settings.strip()
lhs_pars.settings_line(settings)
else:
output_f.write(line)