Inital commit of a major revision of this example.

This commit is contained in:
Achim D. Brucker 2019-07-25 10:50:09 +01:00
parent d0306e62bc
commit beeb948c77
10 changed files with 1599 additions and 31 deletions

View File

@ -6,3 +6,9 @@ session "mini_odo" = "Isabelle_DOF" +
"isadof.cfg"
"preamble.tex"
"build"
"root.bib"
"lstisadof.sty"
"figures/df-numerics-encshaft.png"
"figures/odometer.jpeg"
"figures/three-phase-odo.pdf"
"figures/wheel-df.png"

Binary file not shown.

After

Width:  |  Height:  |  Size: 27 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.4 MiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 23 KiB

View File

@ -1,3 +1,3 @@
Template: scrartcl
Ontology: scholarly_paper
Template: scrreprt-modern
Ontology: technical_report
Ontology: cenelec_50128

View File

@ -0,0 +1,183 @@
\definecolor{OliveGreen} {cmyk}{0.64,0,0.95,0.40}
\definecolor{BrickRed} {cmyk}{0,0.89,0.94,0.28}
\definecolor{Blue} {cmyk}{1,1,0,0}
\definecolor{CornflowerBlue}{cmyk}{0.65,0.13,0,0}
%%%\lst@BeginAspect[keywords]{isar}
\gdef\lst@tagtypes{s}
\gdef\lst@TagKey#1#2{%
\lst@Delim\lst@tagstyle #2\relax
{Tag}\lst@tagtypes #1%
{\lst@BeginTag\lst@EndTag}%
\@@end\@empty{}}
\lst@Key{tag}\relax{\lst@TagKey\@empty{#1}}
\lst@Key{tagstyle}{}{\def\lst@tagstyle{#1}}
\lst@AddToHook{EmptyStyle}{\let\lst@tagstyle\@empty}
\gdef\lst@BeginTag{%
\lst@DelimOpen
\lst@ifextags\else
{\let\lst@ifkeywords\iftrue
\lst@ifmarkfirstintag \lst@firstintagtrue \fi}}
\lst@AddToHookExe{ExcludeDelims}{\let\lst@ifextags\iffalse}
\gdef\lst@EndTag{\lst@DelimClose\lst@ifextags\else}
\lst@Key{usekeywordsintag}t[t]{\lstKV@SetIf{#1}\lst@ifusekeysintag}
\lst@Key{markfirstintag}f[t]{\lstKV@SetIf{#1}\lst@ifmarkfirstintag}
\gdef\lst@firstintagtrue{\global\let\lst@iffirstintag\iftrue}
\global\let\lst@iffirstintag\iffalse
\lst@AddToHook{PostOutput}{\lst@tagresetfirst}
\lst@AddToHook{Output}
{\gdef\lst@tagresetfirst{\global\let\lst@iffirstintag\iffalse}}
\lst@AddToHook{OutputOther}{\gdef\lst@tagresetfirst{}}
\lst@AddToHook{Output}
{\ifnum\lst@mode=\lst@tagmode
\lst@iffirstintag \let\lst@thestyle\lst@gkeywords@sty \fi
\lst@ifusekeysintag\else \let\lst@thestyle\lst@gkeywords@sty\fi
\fi}
\lst@NewMode\lst@tagmode
\gdef\lst@Tag@s#1#2\@empty#3#4#5{%
\lst@CArg #1\relax\lst@DefDelimB {}{}%
{\ifnum\lst@mode=\lst@tagmode \expandafter\@gobblethree \fi}%
#3\lst@tagmode{#5}%
\lst@CArg #2\relax\lst@DefDelimE {}{}{}#4\lst@tagmode}%
\gdef\lst@BeginCDATA#1\@empty{%
\lst@TrackNewLines \lst@PrintToken
\lst@EnterMode\lst@GPmode{}\let\lst@ifmode\iffalse
\lst@mode\lst@tagmode #1\lst@mode\lst@GPmode\relax\lst@modetrue}
%%\lst@EndAspect
% \gdef\lst@BeginTag{%
% \lst@DelimOpen
% \lst@ifextags\else
% {\let\lst@ifkeywords\iftrue
% \lst@ifmarkfirstintag\lst@firstintagtrue\fi\color{green}}}
% \gdef\lst@EndTag{\lst@DelimClose\lst@ifextags\else\color{green}}
\def\beginlstdelim#1#2#3%
{%
\def\endlstdelim{\texttt{\textbf{\color{black!60}#2}}\egroup}%
\ttfamily\textbf{\color{black!60}#1}\bgroup\rmfamily\color{#3}\aftergroup\endlstdelim%
}
\newcommand{\subscr}[1]{\ensuremath{_{\text{#1}}}}
\newcommand{\supscr}[1]{\ensuremath{^{\text{#1}}}}
\lstdefinestyle{ISAR}{%
language=%
,basicstyle=\ttfamily%
,showspaces=false%
,showlines=false%
,columns=flexible%
,keepspaces
,mathescape=false,
,morecomment=[s]{(*}{*)}%
% ,moredelim=*[s][\rmfamily]{\{*}{*\}}%
,moredelim = **[is][\beginlstdelim{\{*}{*\}}{black}]{\{*}{*\}}
,showstringspaces=false%
,moredelim=*[is][\supscr]{<bsup>}{<esup>}%
,moredelim=*[is][\subscr]{<bsub>}{<esub>}%
,literate={%
{...}{\,\ldots\,}3%
{<Open>}{\ensuremath{\isacartoucheopen}}1%
{<open>}{\ensuremath{\isacartoucheopen}}1%
{<@>}{@}1%
{"}{}0%
{~}{\ }1%
{::}{:\!:}1%
{<Close>}{\ensuremath{\isacartoucheclose}}1%
{<close>}{\ensuremath{\isacartoucheclose}}1%
{\\<Gamma>}{\ensuremath{\Gamma}}1%
{\\<times>}{\ensuremath{\times}}1%
{\\<equiv>}{\ensuremath{\equiv}}1%
{\\<Rightarrow>}{\ensuremath{\Rightarrow}}1%
{\\<rightarrow>}{\ensuremath{\rightarrow}}1%
{\\<longrightarrow>}{\ensuremath{\rightarrow}}1%
{\\<and>}{\ensuremath{\land}}1%
{\\<or>}{\ensuremath{\lor}}1%
{\\<lfloor>}{\ensuremath{\lfloor}}1%
{\\<rfloor>}{\ensuremath{\rfloor}}1%
%{\\<lparr>}{\ensuremath{\lparr}}1%
%{\\<rparr>}{\ensuremath{\rparr}}1%
{\\<le>}{\ensuremath{\le}}1%
{\\<delta>}{\ensuremath{\delta}}1%
{\\<lambda>}{\ensuremath{\lambda}}1%
{\\<bar>}{\ensuremath{\vert}}1%
{\<sigma>}{\ensuremath{\sigma}}1%
{\\<lparr>}{\ensuremath{\isasymlparr}}1%
{\\<rparr>}{\ensuremath{\isasymrparr}}1%
{\\<leftrightarrow>}{\ensuremath{\leftrightarrow}}1%
{\{*}{\raise.3ex\hbox{$\scriptscriptstyle\langle$}}1%
{*\}}{\raise.3ex\hbox{$\scriptscriptstyle\rangle$}}1%
{\\<open>}{\raise.3ex\hbox{$\scriptscriptstyle\langle$}}1%
{\\<close>}{\raise.3ex\hbox{$\scriptscriptstyle\rangle$}}1%
{\\<forall>}{\ensuremath{\forall}}1%
{\\<exists>}{\ensuremath{\exists}}1%
{\\<in>}{\ensuremath{\in}}1%
{\\<delta>}{\ensuremath{\delta}}1%
{\\<real>}{\ensuremath{\mathbb{R}}}1%
{\\<noteq>}{\ensuremath{\neq}}1%
{\\<Forall>}{\ensuremath{\bigwedge\,}}1%
{\\<lbrakk>}{\ensuremath{\mathopen{\lbrack\mkern-3mu\lbrack}}}1%
{\\<lbrace>}{\ensuremath{\mathopen{\lbrace\mkern-4.5mu\mid}}}1%
{\\<rbrakk>}{\ensuremath{\mathclose{\rbrack\mkern-3mu\rbrack}}}1%
{\\<rbrace>}{\ensuremath{\mathclose{\mid\mkern-4.5mu\rbrace}}}1%
}%
% % Defining "tags" (text-antiquotations) based on 1-keywords
,tag=**[s]{@\{}{\}}%
,tagstyle=\color{CornflowerBlue}%
,markfirstintag=true%
,keywordstyle=\bfseries%
,keywords={}
% Defining 2-keywords
,keywordstyle=[2]{\color{Blue!60}\bfseries}%
,alsoletter={*,-}
,morekeywords=[2]{case, then, show, theory, begin, end, ML,section,subsection,paragraph,chapter,text}%
%,moredelim=[s][\textit]{<}{>}
% Defining 3-keywords
,keywordstyle=[3]{\color{OliveGreen!60}\bfseries}%
,morekeywords=[3]{doc_class,declare_reference,update_instance*,
open_monitor*, close_monitor*, declare_reference*,section*,text*,title*,abstract*}%
% Defining 4-keywords
,keywordstyle=[4]{\color{black!60}\bfseries}%
,morekeywords=[4]{where, imports, keywords}%
% Defining 5-keywords
,keywordstyle=[5]{\color{BrickRed!70}\bfseries}%
,morekeywords=[5]{datatype, by, fun, Definition*, definition, type_synonym, typedecl, consts, assumes, and, shows, proof, next, qed, lemma, theorem}%
% Defining 6-keywords
,keywordstyle=[6]{\itshape}%
,morekeywords=[6]{meta-args, ref, expr, class_id}%
%
}%
%%
\lstnewenvironment{isar}[1][]{\lstset{style=ISAR,
backgroundcolor=\color{black!2},
frame=lines,mathescape,
basicstyle=\footnotesize\ttfamily,#1}}{}
%%%
\def\inlineisar{\lstinline[style=ISAR,breaklines=true,mathescape,breakatwhitespace=true]}
\lstnewenvironment{out}[1][]{\lstset{
backgroundcolor=\color{green!2},
frame=lines,mathescape,breakatwhitespace=true
,columns=flexible%
,basicstyle=\footnotesize\rmfamily,#1}}{}
%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%
\lstloadlanguages{ML}
\lstdefinestyle{sml}{basicstyle=\ttfamily,%
commentstyle=\itshape,%
keywordstyle=\bfseries\color{CornflowerBlue},%
ndkeywordstyle=\color{red},%
language=ML,
,keywordstyle=[6]{\itshape}%
,morekeywords=[6]{args_type}%
}%
\lstnewenvironment{sml}[1][]{\lstset{style=sml,
backgroundcolor=\color{Blue!4},
frame=lines,
basicstyle=\footnotesize\ttfamily,#1}}{}
%%%
\def\inlinesml{\lstinline[style=sml,breaklines=true,mathescape,breakatwhitespace=true]}

View File

@ -1,3 +1,12 @@
%% This is a placeholder for user-specific configuration and packages.
\title{mini-odo}{}{}{}{}{}{}
\author{By brucker}{}{}{}{}{}
\usepackage{listings}
\usepackage{lstisadof}
\usepackage{wrapfig}
\usepackage{paralist}
\usepackage{numprint}
\newcommand{\fixIsarList}{\vspace{-\topsep}\vspace{-\baselineskip}\mbox{}\\[0pt]\noindent}
\newcommand{\eg}{e.\,g.}
\newcommand{\ie}{i.\,e.}
\author{}
\title{}

View File

@ -0,0 +1,884 @@
@STRING{pub-springer={Springer} }
@STRING{pub-springer:adr="" }
@STRING{s-lncs = "LNCS" }
@Manual{ wenzel:isabelle-isar:2017,
title = {The Isabelle/Isar Reference Manual},
author = {Makarius Wenzel},
year = 2017,
note = {Part of the Isabelle distribution.}
}
@Book{ adler:r:2010,
abstract = {Presents a guide to the R computer language, covering such
topics as the user interface, packages, syntax, objects,
functions, object-oriented programming, data sets, lattice
graphics, regression models, and bioconductor.},
added-at = {2013-01-10T22:39:38.000+0100},
address = {Sebastopol, CA},
author = {Adler, Joseph},
isbn = {9780596801700 059680170X},
keywords = {R},
publisher = {O'Reilly},
refid = 432987461,
title = {R in a nutshell},
year = 2010
}
@InCollection{ wenzel.ea:building:2007,
abstract = {We present the generic system framework of
Isabelle/Isarunderlying recent versions of Isabelle. Among
other things, Isar provides an infrastructure for Isabelle
plug-ins, comprising extensible state components and
extensible syntax that can be bound to tactical ML
programs. Thus the Isabelle/Isar architecture may be
understood as an extension and refinement of the
traditional LCF approach, with explicit infrastructure for
building derivative systems. To demonstrate the technical
potential of the framework, we apply it to a concrete
formalmethods tool: the HOL-Z 3.0 environment, which is
geared towards the analysis of Z specifications and formal
proof of forward-refinements.},
author = {Makarius Wenzel and Burkhart Wolff},
booktitle = {TPHOLs 2007},
editor = {Klaus Schneider and Jens Brandt},
language = {USenglish},
acknowledgement={none},
pages = {352--367},
publisher = pub-springer,
address = pub-springer:adr,
number = 4732,
series = s-lncs,
title = {Building Formal Method Tools in the {Isabelle}/{Isar}
Framework},
doi = {10.1007/978-3-540-74591-4_26},
year = 2007
}
@Misc{ w3c:ontologies:2015,
title = {Ontologies},
organisation = {W3c},
url = {https://www.w3.org/standards/semanticweb/ontology},
year = 2018
}
@Book{ boulanger:cenelec-50128:2015,
author = {Boulanger, Jean-Louis},
title = {{CENELEC} 50128 and {IEC} 62279 Standards},
publisher = {Wiley-ISTE},
year = 2015,
address = {Boston},
note = {The reference on the standard.}
}
@Booklet{ cc:cc-part3:2006,
bibkey = {cc:cc-part3:2006},
key = {Common Criteria},
institution = {Common Criteria},
language = {USenglish},
month = sep,
year = 2006,
public = {yes},
title = {Common Criteria for Information Technology Security
Evaluation (Version 3.1), {Part} 3: Security assurance
components},
note = {Available as document
\href{http://www.commoncriteriaportal.org/public/files/CCPART3V3.1R1.pdf}
{CCMB-2006-09-003}},
number = {CCMB-2006-09-003},
acknowledgement={brucker, 2007-04-24}
}
@Book{ nipkow.ea:isabelle:2002,
author = {Tobias Nipkow and Lawrence C. Paulson and Markus Wenzel},
title = {Isabelle/HOL---A Proof Assistant for Higher-Order Logic},
publisher = pub-springer,
address = pub-springer:adr,
series = s-lncs,
volume = 2283,
doi = {10.1007/3-540-45949-9},
abstract = {This book is a self-contained introduction to interactive
proof in higher-order logic (\acs{hol}), using the proof
assistant Isabelle2002. It is a tutorial for potential
users rather than a monograph for researchers. The book has
three parts.
1. Elementary Techniques shows how to model functional
programs in higher-order logic. Early examples involve
lists and the natural numbers. Most proofs are two steps
long, consisting of induction on a chosen variable followed
by the auto tactic. But even this elementary part covers
such advanced topics as nested and mutual recursion. 2.
Logic and Sets presents a collection of lower-level tactics
that you can use to apply rules selectively. It also
describes Isabelle/\acs{hol}'s treatment of sets, functions
and relations and explains how to define sets inductively.
One of the examples concerns the theory of model checking,
and another is drawn from a classic textbook on formal
languages. 3. Advanced Material describes a variety of
other topics. Among these are the real numbers, records and
overloading. Advanced techniques are described involving
induction and recursion. A whole chapter is devoted to an
extended example: the verification of a security protocol. },
year = 2002,
acknowledgement={brucker, 2007-02-19},
bibkey = {nipkow.ea:isabelle:2002},
tags = {noTAG},
clearance = {unclassified},
timestap = {2008-05-26}
}
@InProceedings{ wenzel:asynchronous:2014,
author = {Makarius Wenzel},
title = {Asynchronous User Interaction and Tool Integration in
Isabelle/{PIDE}},
booktitle = {Interactive Theorem Proving (ITP)},
pages = {515--530},
year = 2014,
crossref = {klein.ea:interactive:2014},
doi = {10.1007/978-3-319-08970-6_33},
timestamp = {Sun, 21 May 2017 00:18:59 +0200},
abstract = { Historically, the LCF tradition of interactive theorem
proving was tied to the read-eval-print loop, with
sequential and synchronous evaluation of prover commands
given on the command-line. This user-interface technology
was adequate when R. Milner introduced his LCF proof
assistant in the 1970-ies, but it severely limits the
potential of current multicore hardware and advanced IDE
front-ends.
Isabelle/PIDE breaks this loop and retrofits the
read-eval-print phases into an asynchronous model of
document-oriented proof processing. Instead of feeding a
sequence of individual commands into the prover process,
the primary interface works via edits over a family of
document versions. Execution is implicit and managed by the
prover on its own account in a timeless and stateless
manner. Various aspects of interactive proof checking are
scheduled according to requirements determined by the
front-end perspective on the proof document, while making
adequate use of the CPU resources on multicore hardware on
the back-end.
Recent refinements of Isabelle/PIDE provide an explicit
concept of asynchronous print functions over existing proof
states. This allows to integrate long-running or
potentially non-terminating tools into the document-model.
Applications range from traditional proof state output
(which may consume substantial time in interactive
development) to automated provers and dis-provers that
report on existing proof document content (e.g.
Sledgehammer, Nitpick, Quickcheck in Isabelle/HOL).
Moreover, it is possible to integrate query operations via
additional GUI panels with separate input and output (e.g.
for Sledgehammer or find-theorems). Thus the Prover IDE
provides continuous proof processing, augmented by add-on
tools that help the user to continue writing proofs. }
}
@Proceedings{ klein.ea:interactive:2014,
editor = {Gerwin Klein and Ruben Gamboa},
title = {Interactive Theorem Proving - 5th International
Conference, {ITP} 2014, Held as Part of the Vienna Summer
of Logic, {VSL} 2014, Vienna, Austria, July 14-17, 2014.
Proceedings},
series = s-lncs,
volume = 8558,
publisher = pub-springer,
year = 2014,
doi = {10.1007/978-3-319-08970-6},
isbn = {978-3-319-08969-0}
}
@InProceedings{ bezzecchi.ea:making:2018,
title = {Making Agile Development Processes fit for V-style
Certification Procedures},
author = {Bezzecchi, S. and Crisafulli, P. and Pichot, C. and Wolff,
B.},
booktitle = {{ERTS'18}},
abstract = {We present a process for the development of safety and
security critical components in transportation systems
targeting a high-level certification (CENELEC 50126/50128,
DO 178, CC ISO/IEC 15408).
The process adheres to the objectives of an ``agile
development'' in terms of evolutionary flexibility and
continuous improvement. Yet, it enforces the overall
coherence of the development artifacts (ranging from proofs
over tests to code) by a particular environment (CVCE).
In particular, the validation process is built around a
formal development based on the interactive theorem proving
system Isabelle/HOL, by linking the business logic of the
application to the operating system model, down to code and
concrete hardware models thanks to a series of refinement
proofs.
We apply both the process and its support in CVCE to a
case-study that comprises a model of an odometric service
in a railway-system with its corresponding implementation
integrated in seL4 (a secure kernel for which a
comprehensive Isabelle development exists). Novel
techniques implemented in Isabelle enforce the coherence of
semi-formal and formal definitions within to specific
certification processes in order to improve their
cost-effectiveness. },
pdf = {https://www.lri.fr/~wolff/papers/conf/2018erts-agile-fm.pdf},
year = 2018,
series = {ERTS Conference Proceedings},
location = {Toulouse}
}
@Misc{ owl2012,
title = {OWL 2 Web Ontology Language},
note = {\url{https://www.w3.org/TR/owl2-overview/}, Document
Overview (Second Edition)},
author = {World Wide Web Consortium}
}
@Misc{ protege,
title = {Prot{\'e}g{\'e}},
note = {\url{https://protege.stanford.edu}},
year = 2018
}
@Misc{ cognitum,
title = {Fluent Editor},
note = {\url{http://www.cognitum.eu/Semantics/FluentEditor/}},
year = 2018
}
@Misc{ neon,
title = {The NeOn Toolkit},
note = {\url{http://neon-toolkit.org}},
year = 2018
}
@Misc{ owlgred,
title = {OWLGrEd},
note = {\url{http://owlgred.lumii.lv/}},
year = 2018
}
@Misc{ rontorium,
title = {R Language Package for FLuent Editor (rOntorion)},
note = {\url{http://www.cognitum.eu/semantics/FluentEditor/rOntorionFE.aspx}},
year = 2018
}
@InProceedings{ DBLP:conf/mkm/BlanchetteHMN15,
author = {Jasmin Christian Blanchette and Maximilian P. L. Haslbeck
and Daniel Matichuk and Tobias Nipkow},
title = {Mining the Archive of Formal Proofs},
booktitle = {Intelligent Computer Mathematics - International
Conference, {CICM} 2015, Washington, DC, USA, July 13-17,
2015, Proceedings},
pages = {3--17},
year = 2015,
url = {https://doi.org/10.1007/978-3-319-20615-8\_1},
doi = {10.1007/978-3-319-20615-8\_1},
timestamp = {Fri, 02 Nov 2018 09:40:47 +0100},
biburl = {https://dblp.org/rec/bib/conf/mkm/BlanchetteHMN15},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
@InCollection{ brucker.ea:isabelle-ontologies:2018,
abstract = {While Isabelle is mostly known as part of Isabelle/HOL (an
interactive theorem prover), it actually provides a
framework for developing a wide spectrum of applications. A
particular strength of the Isabelle framework is the
combination of text editing, formal verification, and code
generation.\\\\Up to now, Isabelle's document preparation
system lacks a mechanism for ensuring the structure of
different document types (as, e.g., required in
certification processes) in general and, in particular,
mechanism for linking informal and formal parts of a
document.\\\\In this paper, we present Isabelle/DOF, a
novel Document Ontology Framework on top of Isabelle.
Isabelle/DOF allows for conventional typesetting \emph{as
well} as formal development. We show how to model document
ontologies inside Isabelle/DOF, how to use the resulting
meta-information for enforcing a certain document
structure, and discuss ontology-specific IDE support.},
address = {Heidelberg},
author = {Achim D. Brucker and Idir Ait-Sadoune and Paolo Crisafulli
and Burkhart Wolff},
booktitle = {Conference on Intelligent Computer Mathematics (CICM)},
doi = {10.1007/978-3-319-96812-4_3},
keywords = {Isabelle/Isar, HOL, Ontologies},
language = {USenglish},
location = {Hagenberg, Austria},
number = 11006,
pdf = {https://www.brucker.ch/bibliography/download/2018/brucker.ea-isabelle-ontologies-2018.pdf},
publisher = {Springer-Verlag},
series = {Lecture Notes in Computer Science},
title = {Using the {Isabelle} Ontology Framework: Linking the
Formal with the Informal},
url = {https://www.brucker.ch/bibliography/abstract/brucker.ea-isabelle-ontologies-2018},
year = 2018
}
@InCollection{ brucker.wolff:isa_def-design-impl:2019,
abstract = {DOF is a novel framework for defining ontologies and enforcing them during document
development and evolution. A major goal of DOF the integrated development of formal
certification documents (e.g., for Common Criteria or CENELEC 50128) that require
consistency across both formal and informal arguments. To support a consistent
development of formal and informal parts of a document, we implemented Isabelle/DOF,
an implementation of DOF on top of the formal methods framework Isabelle/HOL. A
particular emphasis is put on a deep integration into Isabelles IDE, which allows
for smooth ontology development as well as immediate ontological feedback during
the editing of a document.
In this paper, we give an in-depth presentation of the design concepts of DOFs
Ontology Definition Language (ODL) and key aspects of the technology of its
implementation. Isabelle/DOF is the first ontology lan- guage supporting
machine-checked links between the formal and informal parts in an LCF-style
interactive theorem proving environment. Sufficiently annotated, large documents
can easily be developed collaboratively, while ensuring their consistency, and the
impact of changes (in the formal and the semi-formal content) is tracked automatically.},
address = {Heidelberg},
author = {Achim D. Brucker and Burkhart Wolff},
booktitle = {International Conference on Software Engineering and Formal Methods},
keywords = {Isabelle/Isar, HOL, Ontologies, Documentation},
language = {USenglish},
location = {Oslo, Austria},
number = "to appear",
publisher = {Springer-Verlag},
series = {Lecture Notes in Computer Science},
title = {{I}sabelle/{DOF}: {D}esign and {I}mplementation},
year = 2019
}
@InProceedings{ DBLP:conf/itp/Wenzel14,
author = {Makarius Wenzel},
title = {Asynchronous User Interaction and Tool Integration in Isabelle/PIDE},
booktitle = {Interactive Theorem Proving (ITP)},
pages = {515--530},
year = 2014,
doi = {10.1007/978-3-319-08970-6_33},
timestamp = {Sun, 21 May 2017 00:18:59 +0200},
biburl = {https://dblp.org/rec/bib/conf/itp/Wenzel14},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
@InProceedings{ DBLP:journals/corr/Wenzel14,
author = {Makarius Wenzel},
title = {System description: Isabelle/jEdit in 2014},
booktitle = {Proceedings Eleventh Workshop on User Interfaces for
Theorem Provers, {UITP} 2014, Vienna, Austria, 17th July
2014.},
pages = {84--94},
year = 2014,
doi = {10.4204/EPTCS.167.10},
timestamp = {Wed, 03 May 2017 14:47:58 +0200},
biburl = {https://dblp.org/rec/bib/journals/corr/Wenzel14},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
@InProceedings{ DBLP:conf/mkm/BarrasGHRTWW13,
author = {Bruno Barras and Lourdes Del Carmen
Gonz{\'{a}}lez{-}Huesca and Hugo Herbelin and Yann
R{\'{e}}gis{-}Gianas and Enrico Tassi and Makarius Wenzel
and Burkhart Wolff},
title = {Pervasive Parallelism in Highly-Trustable Interactive
Theorem Proving Systems},
booktitle = {Intelligent Computer Mathematics - MKM, Calculemus, DML,
and Systems and Projects},
pages = {359--363},
year = 2013,
doi = {10.1007/978-3-642-39320-4_29},
timestamp = {Sun, 04 Jun 2017 10:10:26 +0200},
biburl = {https://dblp.org/rec/bib/conf/mkm/BarrasGHRTWW13},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
@TechReport{ bsi:50128:2014,
type = {Standard},
key = {BS EN 50128:2011},
month = apr,
year = 2014,
series = {British Standards Publication},
title = {BS EN 50128:2011: Railway applications -- Communication,
signalling and processing systems -- Software for railway
control and protecting systems},
institution = {Britisch Standards Institute (BSI)},
keywords = {CENELEC},
abstract = {This European Standard is part of a group of related
standards. The others are EN 50126-1:1999 "Railway
applications -- The specification and demonstration of
Reliability, Availability, Maintainability and Safety
(RAMS) -- Part 1: Basic requirements and generic process --
and EN 50129:2003 "Railway applications -- Communication,
signalling and processing systems -- Safety related
electronic systems for signalling". EN 50126-1 addresses
system issues on the widest scale, while EN 50129 addresses
the approval process for individual systems which can exist
within the overall railway control and protection system.
This European Standard concentrates on the methods which
need to be used in order to provide software which meets
the demands for safety integrity which are placed upon it
by these wider considerations. This European Standard
provides a set of requirements with which the development,
deployment and maintenance of any safety-related software
intended for railway control and protection applications
shall comply. It defines requirements concerning
organisational structure, the relationship between
organisations and division of responsibility involved in
the development, deployment and maintenanceactivities.}
}
@TechReport{ ds:50126-1:2014,
type = {Standard},
key = {DS/EN 50126-1:1999},
month = oct,
year = 2014,
series = {Dansk standard},
title = {EN 50126-1:1999: Railway applications -- The specification
and demonstration of Reliability, Availability,
Maintainability and Safety (RAMS) -- Part 1: Basic
requirements and generic process},
institution = {Danish Standards Foundation},
keywords = {CENELEC},
abstract = {This European Standard provides Railway Authorities and
the railway support industry, throughout the European
Union, with a process which will enable the implementation
of a consistent approach to the management of reliablity,
availability, maintainability and safety, denoted by the
acronym RAMS. Processes for the specification and
demonstration of RAMS requirements are cornerstones of this
standard. This European Standardc aims to promote a common
understanding and approach to the management of RAMS.
This European Standard can be applied systematically by a
railway authority and railway support industry,
throughoutall phasesof thelifecycle of a railway
application, to develop railway specific RAMS requirements
and to achieve compliance with these requirements. The
systems-level approach defined by this European Standard
facilitates assessment of the RAMS interactions between
elements of complex railway applications. This European
Standard promotes co-operation between railway authority
and railway support industry, within a variety of
procurementstrategies, in the achievement of an optimal
combination of RAMS and costfor railway applications.
Adoption of this European Standard will support the
principles of the European Single Market andfacilitate
Europeanrailway inter-operability. The process defined by
this European Standard assumesthat railway authorities and
railway support industry have business-level policies
addressing Quality, Performance and Safety. The approach
defined in this standard is consistent with the application
of quality management requirements contained within the ISO
9000 series of International standards.}
}
@Book{ paulson:ml:1996,
author = {Lawrence C. Paulson},
title = {{ML} for the Working Programmer},
publisher = {Cambridge Press},
year = 1996,
url = {http://www.cl.cam.ac.uk/~lp15/MLbook/pub-details.html},
acknowledgement={none}
}
@Book{ pollak:beginning:2009,
title = {Beginning Scala},
author = {David Pollak},
publisher = {Apress},
year = 2009,
isbn = {978-1-4302-1989-7}
}
@Article{ klein:operating:2009,
author = {Gerwin Klein},
title = {Operating System Verification --- An Overview},
journal = {S\={a}dhan\={a}},
publisher = pub-springer,
year = 2009,
volume = 34,
number = 1,
month = feb,
pages = {27--69},
abstract = {This paper gives a high-level introduction to the topic of
formal, interactive, machine-checked software verification
in general, and the verification of operating systems code
in particular. We survey the state of the art, the
advantages and limitations of machine-checked code proofs,
and describe two specific ongoing larger-scale verification
projects in more detail.}
}
@InProceedings{ wenzel:system:2014,
author = {Makarius Wenzel},
title = {System description: Isabelle/jEdit in 2014},
booktitle = {Workshop on User Interfaces for Theorem Provers, {UITP}},
pages = {84--94},
year = 2014,
doi = {10.4204/EPTCS.167.10},
timestamp = {Wed, 12 Sep 2018 01:05:15 +0200},
editor = {Christoph Benzm{\"{u}}ller and Bruno {Woltzenlogel Paleo}},
volume = 167
}
@InProceedings{ feliachi.ea:circus:2013,
author = {Abderrahmane Feliachi and Marie{-}Claude Gaudel and
Makarius Wenzel and Burkhart Wolff},
title = {The Circus Testing Theory Revisited in Isabelle/HOL},
booktitle = {{ICFEM}},
series = {Lecture Notes in Computer Science},
volume = 8144,
pages = {131--147},
publisher = {Springer},
year = 2013
}
@Article{ Klein2014,
author = {Gerwin Klein and June Andronick and Kevin Elphinstone and
Toby C. Murray and Thomas Sewell and Rafal Kolanski and
Gernot Heiser},
title = {Comprehensive formal verification of an {OS} microkernel},
journal = {{ACM} Trans. Comput. Syst.},
year = 2014,
volume = 32,
number = 1,
pages = {2:1--2:70},
bibsource = {dblp computer science bibliography, https://dblp.org},
biburl = {https://dblp.org/rec/bib/journals/tocs/KleinAEMSKH14},
doi = {10.1145/2560537},
timestamp = {Tue, 03 Jan 2017 11:51:57 +0100},
url = {http://doi.acm.org/10.1145/2560537}
}
@InProceedings{ bicchierai.ea:using:2013,
author = {Bicchierai, Irene and Bucci, Giacomo and Nocentini, Carlo
and Vicario, Enrico},
editor = {Keller, Hubert B. and Pl{\"o}dereder, Erhard and Dencker,
Peter and Klenk, Herbert},
title = {Using Ontologies in the Integration of Structural,
Functional, and Process Perspectives in the Development of
Safety Critical Systems},
booktitle = {Reliable Software Technologies -- Ada-Europe 2013},
year = 2013,
publisher = {Springer Berlin Heidelberg},
address = {Berlin, Heidelberg},
pages = {95--108},
abstract = {We present a systematic approach for the efficient
management of the data involved in the development process
of safety critical systems, illustrating how the activities
performed during the life-cycle can be integrated in a
common framework. Information needed in these activities
reflects concepts that pertain to three different
perspectives: i) structural elements of design and
implementation; ii) functional requirements and quality
attributes; iii) organization of the overall process. The
integration of these concepts may considerably improve the
trade-off between reward and effort spent in verification
and quality-driven activities.},
isbn = {978-3-642-38601-5}
}
@Article{ zhao.ea:formal:2016,
author = {Yongwang Zhao and David San{\'{a}}n and Fuyuan Zhang and
Yang Liu},
title = {Formal Specification and Analysis of Partitioning
Operating Systems by Integrating Ontology and Refinement},
journal = {{IEEE} Trans. Industrial Informatics},
volume = 12,
number = 4,
pages = {1321--1331},
year = 2016,
abstract = {Partitioning operating systems (POSs) have been widely
applied in safety-critical domains from aerospace to
automotive. In order to improve the safety and the
certification process of POSs, the ARINC 653 standard has
been developed and complied with by the mainstream POSs.
Rigorous formalization of ARINC 653 can reveal hidden
errors in this standard and provide a necessary foundation
for formal verification of POSs and ARINC 653 applica-
tions. For the purpose of reusability and efficiency, a
novel methodology by integrating ontology and refinement is
proposed to formally specify and analyze POSs in this
paper. An ontology of POSs is developed as an intermediate
model between informal descriptions of ARINC 653 and the
formal specification in Event-B. A semiautomatic
translation from the ontology and ARINC 653 into Event-B is
implemented, which leads to a complete Event-B
specification for ARINC 653 compliant POSs. During the
formal analysis, six hidden errors in ARINC 653 have been
discovered and fixed in the Event-B specification. We also
validate the existence of these errors in two open-source
POSs, i.e., XtratuM and POK. By introducing the ontology,
the degree of automatic verification of the Event-B
specification reaches a higher level}
}
@InProceedings{ denney.ea:evidence:2013,
author = {E. {Denney} and G. {Pai}},
booktitle = {2013 IEEE International Symposium on Software Reliability
Engineering Workshops (ISSREW)},
title = {Evidence arguments for using formal methods in software
certification},
year = 2013,
pages = {375--380},
abstract = {We describe a generic approach for automatically
integrating the output generated from a formal method/tool
into a software safety assurance case, as an evidence
argument, by (a) encoding the underlying reasoning as a
safety case pattern, and (b) instantiating it using the
data produced from the method/tool. We believe this
approach not only improves the trustworthiness of the
evidence generated from a formal method/tool, by explicitly
presenting the reasoning and mechanisms underlying its
genesis, but also provides a way to gauge the suitability
of the evidence in the context of the wider assurance case.
We illustrate our work by application to a real example-an
unmanned aircraft system - where we invoke a formal code
analysis tool from its autopilot software safety case,
automatically transform the verification output into an
evidence argument, and then integrate it into the former.},
keywords = {aircraft;autonomous aerial vehicles;formal
verification;safety-critical software;evidence
arguments;formal methods;software certification;software
safety assurance case;safety case pattern;unmanned aircraft
system;formal code analysis;autopilot software safety
case;verification output;Safety;Software
safety;Cognition;Computer
architecture;Context;Encoding;Safety cases;Safety case
patterns;Formal methods;Argumentation;Software
certification},
doi = {10.1109/ISSREW.2013.6688924},
month = {Nov}
}
@InProceedings{ kaluvuri.ea:quantitative:2014,
author = {Kaluvuri, Samuel Paul and Bezzi, Michele and Roudier,
Yves},
editor = {Eckert, Claudia and Katsikas, Sokratis K. and Pernul,
G{\"u}nther},
title = {A Quantitative Analysis of Common Criteria Certification
Practice},
booktitle = {Trust, Privacy, and Security in Digital Business},
year = 2014,
publisher = {Springer International Publishing},
address = {Cham},
pages = {132--143},
abstract = {The Common Criteria (CC) certification framework defines a
widely recognized, multi-domain certification scheme that
aims to provide security assurances about IT products to c\
onsumers. However, the CC scheme does not prescribe a
monitoring scheme for the CC practice, raising concerns
about the quality of the security assurance provided by the
certification a\ nd questions on its usefulness. In this
paper, we present a critical analysis of the CC practice
that concretely exposes the limitations of current
approaches. We also provide direction\ s to improve the CC
practice.},
isbn = {978-3-319-09770-1}
}
@InProceedings{ ekelhart.ea:ontological:2007,
author = {Ekelhart, Andreas and Fenz, Stefan and Goluch, Gernot and
Weippl, Edgar},
editor = {Venter, Hein and Eloff, Mariki and Labuschagne, Les and
Eloff, Jan and von Solms, Rossouw},
title = {Ontological Mapping of Common Criteria's Security
Assurance Requirements},
booktitle = {New Approaches for Security, Privacy and Trust in Complex
Environments},
year = 2007,
publisher = {Springer US},
address = {Boston, MA},
pages = {85--95},
abstract = {The Common Criteria (CC) for Information Technology
Security Evaluation provides comprehensive guidelines \ for
the evaluation and certification of IT security regarding
data security and data privacy. Due to the very comple\ x
and time-consuming certification process a lot of companies
abstain from a CC certification. We created the CC Ont\
ology tool, which is based on an ontological representation
of the CC catalog, to support the evaluator at the certi\
fication process. Tasks such as the planning of an
evaluation process, the review of relevant documents or the
creat\ ing of reports are supported by the CC Ontology
tool. With the development of this tool we reduce the time
and costs\ needed to complete a certification.},
isbn = {978-0-387-72367-9}
}
@InProceedings{ fenz.ea:formalizing:2009,
author = {Fenz, Stefan and Ekelhart, Andreas},
title = {Formalizing Information Security Knowledge},
booktitle = {Proceedings of the 4th International Symposium on
Information, Computer, and Communications Security},
series = {ASIACCS '09},
year = 2009,
isbn = {978-1-60558-394-5},
location = {Sydney, Australia},
pages = {183--194},
numpages = 12,
url = {http://doi.acm.org/10.1145/1533057.1533084},
doi = {10.1145/1533057.1533084},
acmid = 1533084,
publisher = {ACM},
address = {New York, NY, USA},
keywords = {information security, risk management, security ontology},
abstract = {Unified and formal knowledge models of the information
security domain are fundamental requirements for supporting
and enhancing existing risk management approaches. This
paper describes a security ontology which provides an
ontological structure for information security domain
knowledge. Besides existing best-practice guidelines such
as the German IT Grundschutz Manual also concrete knowledge
of the considered organization is incorporated. An
evaluation conducted by an information security expert team
has shown that this knowledge model can be used to support
a broad range of information security risk management
approaches.}
}
@InProceedings{ gleirscher.ea:incremental:2007,
author = {M. {Gleirscher} and D. {Ratiu} and B. {Schatz}},
booktitle = {2007 International Conference on Systems Engineering and
Modeling},
title = {Incremental Integration of Heterogeneous Systems Views},
year = 2007,
pages = {50--59},
abstract = {To master systems complexity, their industrial development
requires specialized heterogeneous views and techniques and
- correspondingly - engineering tools. These views
generally cover only parts of the system under development,
and critical development defects often occur at the gaps
between them. To successfully achieve an integration that
bridges these gaps, we must tackle it both from the
methodical as well as from the tooling sides. The former
requires answers to questions like: What are the views
provided by the tools? How are they related and extended to
achieve consistency or to form new views? - while the
latter requires answers to: How are views extracted from
the tools? How are they composed and provided to the user?
Our approach, suitable for incremental integration, is
demonstrated in the tool integration framework ToolNet.},
keywords = {computer aided engineering;computer aided software
engineering;software tools;heterogeneous systems
views;systems complexity;tool integration
framework;ToolNet;engineering tools;Systems engineering and
theory;Certification;Integrated circuit
modeling;Bridges;Software tools;Computer aided software
engineering;Computer aided engineering;Costs;Natural
languages;Formal specifications},
doi = {10.1109/ICSEM.2007.373334},
month = {March}
}
@Booklet{ omg:sacm:2018,
bibkey = {omg:sacm:2018},
key = omg,
abstract = {This specification defines a metamodel for representing
structured assurance cases. An Assurance Case is a set of
auditable claims, arguments, and evidence created to
support the claim that a defined system/service will
satisfy the particular requirements. An Assurance Case is a
document that facilitates information exchange between
various system stakeholders such as suppliers and
acquirers, and between the operator and regulator, where
the knowledge related to the safety and security of the
system is communicated in a clear and defendable way. Each
assurance case should communicate the scope of the system,
the operational context, the claims, the safety and/or
security arguments, along with the corresponding
evidence.},
publisher = omg,
language = {USenglish},
month = mar,
keywords = {SACM},
topic = {formalism},
note = {Available as OMG document
\href{http://www.omg.org/cgi-bin/doc?formal/2018-02-02}
{formal/2018-02-02}},
public = {yes},
title = {Structured Assurance Case Metamodel (SACM)},
year = 2018
}
@InProceedings{ kelly.ea:goal:2004,
title = {The Goal Structuring Notation -- A Safety Argument
Notation},
booktitle = {Dependable Systems and Networks},
year = 2004,
month = jul,
author = {Tim Kelly and Rob Weaver}
}
@TechReport{ rushby:formal:1993,
author = {John Rushby},
title = {Formal Methods and the Certification of Critical Systems},
institution = {Computer Science Laboratory, SRI International},
year = 1993,
number = {SRI-CSL-93-7},
address = {Menlo Park, CA},
note = {Also issued under the title {\em Formal Methods and
Digital Systems Validation for Airborne Systems\/} as NASA
Contractor Report 4551, December 1993},
month = dec
}
@InProceedings{ greenaway.ea:bridging:2012,
author = {Greenaway, David and Andronick, June and Klein, Gerwin},
editor = {Beringer, Lennart and Felty, Amy},
title = {Bridging the Gap: Automatic Verified Abstraction of C},
booktitle = {Interactive Theorem Proving},
year = 2012,
publisher = {Springer Berlin Heidelberg},
address = {Berlin, Heidelberg},
pages = {99--115},
abstract = {Before low-level imperative code can be reasoned about in
an interactive theorem prover, it must first be converted
into a logical representation in that theorem prover.
Accurate translations of such code should be conservative,
choosing safe representations over representations
convenient to reason about. This paper bridges the gap
between conservative representation and convenient
reasoning. We present a tool that automatically abstracts
low-level C semantics into higher level specifications,
while generating proofs of refinement in Isabelle/HOL for
each translation step. The aim is to generate a verified,
human-readable specification, convenient for further
reasoning.},
isbn = {978-3-642-32347-8}
}
@inproceedings{BCPW2018,
title = {Making Agile Development Processes fit for V-style Certification
Procedures},
author = {Bezzecchi, S. and Crisafulli, P. and Pichot, C. and Wolff, B.},
booktitle = {{ERTS'18}},
abstract = {We present a process for the development of safety and security
critical components in transportation systems targeting a high-level
certification (CENELEC 50126/50128, DO 178, CC ISO/IEC 15408).
The process adheres to the objectives of an ``agile development'' in
terms of evolutionary flexibility and continuous improvement. Yet, it
enforces the overall coherence of the development artifacts (ranging from
proofs over tests to code) by a particular environment (CVCE).
In particular, the validation process is built around a formal development
based on the interactive theorem proving system Isabelle/HOL, by linking the
business logic of the application to the operating system model, down to
code and concrete hardware models thanks to a series of refinement proofs.
We apply both the process and its support in CVCE to a case-study that
comprises a model of an odometric service in a railway-system with its
corresponding implementation integrated in seL4 (a secure kernel for
which a comprehensive Isabelle development exists). Novel techniques
implemented in Isabelle enforce the coherence of semi-formal
and formal definitions within to specific certification processes
in order to improve their cost-effectiveness.
},
pdf = {https://www.lri.fr/~wolff/papers/conf/2018erts-agile-fm.pdf},
year = {2018},
series = {ERTS Conference Proceedings},
location = {Toulouse}
}

View File

@ -1,31 +1,523 @@
theory mini_odo
imports "Isabelle_DOF.CENELEC_50128"
"Isabelle_DOF.scholarly_paper"
(*<*)
theory
mini_odo
imports
"Isabelle_DOF.CENELEC_50128"
"Isabelle_DOF.technical_report"
begin
declare[[strict_monitor_checking=true]]
(*>*)
section\<open> Some examples of Isabelle's standard antiquotations. \<close>
(* some show-off of standard anti-quotations: *)
text \<open>THIS IS A TEXT\<close>
title*[title::title]\<open>The CENELEC 50128 Ontology\<close>
subtitle*[subtitle::subtitle]\<open>Case Study: An Odometer-Subsystem\<close>
text\<open> @{thm refl} of name @{thm [source] refl}
@{thm[mode=Rule] conjI}
@{file "mini_odo.thy"}
@{value "3+4::int"}
@{const hd}
@{theory HOL.List}
@{term "3"}
@{type bool}
@{term [show_types] "f x = a + x"} \<close>
chapter*[casestudy::technical]\<open>A Case-Study: An Odometer-Subsystem\<close>
text\<open>
In our case study, we will follow the phases of analysis, design, and implementation of the
odometry function of a train. This software processes data from an odometer to compute the position,
speed, and acceleration of a train. This system provides the basis for many
safety critical decisions, \eg, the opening of the doors. Due to its relatively small size, it
is a manageable, albeit realistic target for a comprehensive formal development: it covers a
physical model of the environment, the physical and architectural model of the odometer including
the problem of numerical sampling, and the boundaries of efficient computations. The interplay
between environment and measuring-device as well as the implementation problems on a platform
with limited resources makes the odometer a fairly typical safety critical embedded system.
Due to space reasons, we will focus on the analysis part of the integrated
document; the design and code parts will only be outlined in a final resume. The
\<^emph>\<open>ontological embedding\<close>, which represents a main contribution of this paper, will be presented
in the next two sections.
We start with the capture of a number of informal documents available at the beginning of the
development.
\<close>
section\<open>System Requirements Specification as an \<^emph>\<open>Integrated Source\<close>\<close>
text\<open>Accurate information of a train's location along a track is in an important prerequisite
to safe railway operation. Position, speed and acceleration measurement usually lies on a
set of independent measurements based on different physical principles---as a way to enhance
precision and availability. One of them is an \<^emph>\<open>odometer\<close>, which allows estimating a relative
location while the train runs positions established by other measurements. \<close>
subsection\<open>Capturing ``Basic Principles of Motion and Motion Measurement.''\<close>
text\<open>
A rotary encoder measures the motion of a train. To achieve this, the encoder's shaft is fixed to
the trains wheels axle. When the train moves, the encoder produces a signal pattern directly
related to the trains progress. By measuring the fractional rotation of the encoders shaft and
considering the wheels effective ratio, relative movement of the train can be calculated.
\begin{wrapfigure}[8]{l}{3.9cm}
\centering
\vspace{-.7cm}
\includegraphics[width=3.4cm]{figures/wheel-df}
\caption{Motion sensing via an odometer.}
\label{wheel-df}
\end{wrapfigure}
\autoref{wheel-df} shows that we model a train, seen from a pure kinematics standpoint, as physical
system characterized by a one-dimensional continuous distance function, which represents the
observable of the physical system. Concepts like speed and acceleration were derived concepts
defined as their (gradient) derivatives. We assume the use of the meter, kilogram, and second
(MKS) system.
This model is already based on several fundamental assumptions relevant for the correct
functioning of the system and for its integration into the system as a whole. In
particular, we need to make the following assumptions explicit:\vspace{-.3cm}
\<^item> that the wheel is perfectly circular with a given, constant radius,
\<^item> that the slip between the trains wheel and the track negligible,
\<^item> the distance between all teeth of a wheel is the same and constant, and
\<^item> the sampling rate of positions is a given constant.
These assumptions have to be traced throughout the certification process as
\<^emph>\<open>derived requirements\<close> (or, in CENELEC terminology, as \<^emph>\<open>exported constraints\<close>), which is
also reflected by their tracing throughout the body of certification documents. This may result
in operational regulations, \eg, regular checks for tolerable wheel defects. As for the
\<^emph>\<open>no slip\<close>-assumption, this leads to the modeling of constraints under which physical
slip can be neglected: the device can only produce reliable results under certain physical
constraints (speed and acceleration limits). Moreover, the \<^emph>\<open>no slip\<close>-assumption motivates
architectural arrangements for situations where this assumption cannot be assured (as is the
case, for example, of an emergency breaking) together with error-detection and error-recovery.
\<close>
subsection\<open>Capturing ``System Architecture.''\<close>
text\<open>
\begin{figure}
\centering
\includegraphics[width=.70\textwidth]{figures/three-phase-odo}
\begin{picture}(0,0)
\put(-112,44){\includegraphics[width=.30\textwidth]{figures/odometer}}
\end{picture}
\caption{An odometer with three sensors \inlineisar{C1}, \inlineisar{C2}, and \inlineisar{C3}.}\label{three-phase}
\end{figure}
The requirements analysis also contains a sub-document \<^emph>\<open>system architecture description\<close>
(CENELEC notion) that contains technical drawing of the odometer, a timing diagrams
(see \autoref{three-phase}), and tables describing the encoding of the position
for the possible signal transitions of the sensors \inlineisar{C1}, \inlineisar{C2}, and $C3.$
\<close>
subsection\<open>Capturing ``System Interfaces.''\<close>
text\<open>
The requirements analysis also contains a sub-document \<^emph>\<open>functions and interfaces\<close>
(CENELEC notion) describing the technical format of the output of the odometry function.
This section, \eg, specifies the output \<^emph>\<open>speed\<close> as given by a \<^verbatim>\<open>int_32\<close> to be the
``Estimation of the speed (in mm/sec) evaluated over the latest $N_{\text{avg}}$ samples''
where the speed refers to the physical speed of the train and $N_{\text{avg}}$ a parameter of the
sub-system configuration. \<close>
(*<*)
declare_reference*["df-numerics-encshaft"::figure]
(*>*)
subsection\<open>Capturing ``Required Performances.''\<close>
text\<open>
The given analysis document is relatively implicit on the expected precision of the measurements;
however, certain interface parameters like \inlineisar*Odometric_Position_TimeStamp*
(a counter on the number of samplings) and \inlineisar*Relative_Position* are defined by as
unsigned 32 bit integer. These definitions imply that exported constraints concerning the acceptable
time of service as well the maximum distance before a necessary reboot of the subsystem.
For our case-study, we assume maximum deviation of the \inlineisar*Relative_Position* to the
theoretical distance.
The requirement analysis document describes the physical environment, the architecture
of the measuring device, and the required format and precision of the measurements of the odometry
function as represented (see @{figure (unchecked) "df-numerics-encshaft"}).\<close>
figure*["df-numerics-encshaft"::figure,relative_width="76",src="''figures/df-numerics-encshaft''"]
\<open>Real distance vs. discrete distance vs. shaft-encoder sequence\<close>
subsection\<open>Capturing the ``Software Design Spec'' (Resume).\<close>
text\<open>
\enlargethispage{\baselineskip}
The design provides a function that manages an internal first-in-first-out buffer of
shaft-encodings and corresponding positions. Central for the design is a step-function analyzing
new incoming shaft encodings, checking them and propagating two kinds of error-states (one allowing
recovery, another one, fatal, signaling, \eg, a defect of the receiver hardware),
calculating the relative position, speed and acceleration.
\<close>
subsection\<open>Capturing the ``Software Implementation'' (Resume).\<close>
text\<open>
While the design is executable on a Linux system, it turns out that the generated code from an
Isabelle model is neither executable on resource-constraint target platform, an ARM-based
Sabre-light card, nor certifiable, since the compilation chain via ML to C implies the
inclusion of a run-time system and quite complex libraries.
We adopted therefore a similar approach as used in the seL4 project~@{cite "Klein2014"}: we use a
hand-written implementation in C and verify it via
AutoCorres~@{cite "greenaway.ea:bridging:2012"} against
the design model. The hand-written C-source is integrated into the Isabelle/HOL technically by
registering it in the build-configuration and logically by a trusted C-to-HOL compiler included
in AutoCorres.
\<close>
section\<open>Formal Enrichment of the Software Requirements Specification\<close>
text\<open>
After the \<^emph>\<open>capture\<close>-phase, where we converted/integrated existing informal analysis and design
documents as well as code into an integrated Isabelle document, we entered into the phase of
\<open>formal enrichment\<close>. For example, from the assumptions in the architecture follow
the definitions:
\begin{isar}
definition teeth_per_wheelturn::nat ("tpw") where "tpw \<equiv> SOME x. x > 0"
definition wheel_diameter::real ("w$_d$") where "w$_d$ \<equiv> SOME x. x > 0"
definition wheel_circumference::real ("w$_{\text{circ}}$") where "w$_{\text{circ}}$ \<equiv> pi * w$_d$"
definition \<delta>s$_{\text{res}}$::real where "\<delta>s$_{\text{res}}$ \<equiv> w$_{\text{circ}}$ / (2 * 3 * tpw)"
\end{isar}
Here, \inlineisar{real} refers to the real numbers as defined in the HOL-Analysis
library, which provides concepts such as Cauchy Sequences, limits,
differentiability, and a very substantial part of classical Calculus. \inlineisar{SOME} is the
Hilbert choice operator from HOL; the definitions of the model parameters admit all possible positive values as uninterpreted
constants. Our perfect-wheel assumption is translated into a calculation of the circumference of the
wheel, while \inlineisar{\<delta>s<bsub>res<esub>}, the resolution of the odometer, can be calculated
from the these parameters. HOL-Analysis permits to formalize the fundamental physical observables:
\begin{isar}
type_synonym distance_function = "real\<Rightarrow>real"
definition Speed::"distance_function\<Rightarrow>real\<Rightarrow>real" where "Speed f \<equiv> deriv f"
definition Accel::"distance_function\<Rightarrow>real\<Rightarrow>real"
where "Accel f \<equiv> deriv (deriv f)"
\end{isar}
which permits to constrain the central observable \inlineisar|distance_function| in a
way that they describe the space of ``normal behavior'' where we expect the odometer to produce
reliable measurements over a \inlineisar|distance_function df|.
The essence of the physics of the train is covered by the following definition:
\begin{isar}
definition normally_behaved_distance_function :: "(real \<Rightarrow> real) \<Rightarrow> bool"
where normally_behaved_distance_function df =
( \<forall> t. df(t) \<in> \<real>$_{\ge 0}$ \<and> (\<forall> t \<in> \<real>$_{\le 0}$. df(t) = 0)
\<and> df differentiable on$_{\text{R}}$ \<and> (Speed df)differentiable on$_{\text{R}}$
\<and> (Accel df)differentiable on$_{\ensuremath{R}}$
\<and> (\<forall> t. (Speed df) t \<in> {-Speed$_{\text{Max}}$ .. Speed$_{\text{Max}}$})
\<and> (\<forall> t. (Accel df) t \<in> {-\<bar>Accel$_{\text{Max}}$\<bar> .. \<bar>Accel$_{\text{Max}}$\<bar>}))
\end{isar}
which constrains the distance functions in the bounds described of the informal descriptions and
states them as three-fold differentiable function in certain bounds concerning speed and acceleration.
Note that violations, in particular of the constraints on speed and acceleration, \<^emph>\<open>do\<close> occur in practice.
In such cases, the global system adapts recovery strategies that are out of the scope of our model.
Concepts like \inlineisar+shaft_encoder_state+ (a triple with the sensor values
\inlineisar{C1}, \inlineisar{C2}, \inlineisar{C3}) were formalized as types, while tables were defined as recursive functions:
\enlargethispage{2\baselineskip}\begin{isar}
fun phase$_0$ :: "nat \<Rightarrow> shaft_encoder_state" where
"phase$_0$ (0) = \<lparr> C1 = False, C2 = False, C3 = True \<rparr>"
|"phase$_0$ (1) = \<lparr> C1 = True, C2 = False, C3 = True \<rparr>"
|"phase$_0$ (2) = \<lparr> C1 = True, C2 = False, C3 = False\<rparr>"
|"phase$_0$ (3) = \<lparr> C1 = True, C2 = True, C3 = False\<rparr>"
|"phase$_0$ (4) = \<lparr> C1 = False, C2 = True, C3 = False\<rparr>"
|"phase$_0$ (5) = \<lparr> C1 = False, C2 = True, C3 = True \<rparr>"
|"phase$_0$ x = phase$_0$(x - 6)"
definition Phase ::"nat\<Rightarrow>shaft_encoder_state" where Phase(x) = phase$_0$(x-1)
\end{isar}
We now define shaft encoder sequences as
translations of distance functions:
\begin{isar}
definition encoding::"distance_function\<Rightarrow>nat\<Rightarrow>real\<Rightarrow>shaft_encoder_state"
where "encoding df init$_{\text{pos}}$ \<equiv> \<lambda>x. Phase(nat\<lfloor>df(x) / \<delta>s$_{\text{res}}$\<rfloor> + init$_{\text{pos}}$)"
\end{isar}
where \inlineisar+init$_{\text{pos}}$+ is the initial position of the wheel.
\inlineisar+sampling+'s were constructed from encoding sequences over discretized time points:
\begin{isar}
definition $\!\!$sampling::"distance$\!$_function\<Rightarrow>nat\<Rightarrow>real\<Rightarrow>nat\<Rightarrow>shaft$\!$_encoder$\!$_state"
where "sampling df init$_{\text{pos}}$ \<delta>t \<equiv> \<lambda>n::nat. encoding df init$_{\text{pos}}$ (n * \<delta>t)"
\end{isar}
The sampling interval \inlineisar+\<delta>t+ (the inverse of the sampling frequency) is a critical
parameter of the configuration of a system.
Finally, we can formally define the required performances. From the interface description
and the global model parameters such as wheel diameter, the number of teeth per wheel, the sampling
frequency etc., we can infer the maximal time of service as well the maximum distance the
device can measure.
As an example configuration, choosing 1m for
\inlineisar+w$_d$+, 100 for \inlineisar+tpw+, 80km/h \inlineisar+Speed$_{\text{Max}}$+,
and 14400Hz for the sampling frequency, results in an odometer resolution of 2.3mm,
a maximum distance of 9878km, and a maximal system up-time of 123.4 hours.
The required precision of an odometer can be defined by a constant describing
the maximally allowed difference between \inlineisar+df(n*\<delta>t)+ and
\inlineisar+sampling df init$_{\text{pos}}$ \<delta>t n+ for all \inlineisar+init$_{\text{pos}}$ \<in>{0..5}+.
\<close>
(*<*)
ML\<open>val two_thirty2 = 1024 * 1024 * 1024 * 4;
val dist_max = 0.0023 * (real two_thirty2) / 1000.0;
val dist_h = dist_max / 80.0\<close>
(*>*)
section*[verific::technical]\<open>Verification of the Software Requirements Specification\<close>
text\<open>The original documents contained already various statements that motivate certain safety
properties of the device. For example, the \inlineisar+Phase+-table excludes situations in which
all sensors \inlineisar{C1}, \inlineisar{C2}, and \inlineisar{C3} are all ``off'' or situations in
which sensors are ``on,'' reflecting a physical or electrical error in the odometer. It can be
shown by a very small Isabelle case-distinction proof that this safety requirement follows indeed from the
above definitions:
\begin{isar}
lemma Encoder_Property_1:(C1(Phase x) \<and> C2(Phase x) \<and> C3(Phase x))=False
proof (cases x)
case 0 then show ?thesis by (simp add: Phase_def)
next
case (Suc n) then show ?thesis
by(simp add: Phase_def,rule_tac n = n in cycle_case_split,simp_all)
qed
\end{isar}
for all positions \inlineisar+x+. Similarly, it is proved that the table is indeed
cyclic: \inlineisar+ phase$_0$ x = phase$_0$(x mod 6)+ and locally injective:
\inlineisar+\<forall>x<6. \<forall>y<6. phase$_0$ x = phase$_0$ y \<longrightarrow> x = y+.
These lemmas, building the ``theory of an odometer,'' culminate in a theorem
that we would like to present in more detail.
\begin{isar}
theorem minimal_sampling :
assumes * : normally_behaved_distance_function df
and ** : \<delta>t * Speed$_{\text{Max}}$ < \<delta>s$_{\text{res}}$
shows \<forall> \<delta>X\<le>\<delta>t. 0<\<delta>X \<longrightarrow>
\<exists>f. retracting (f::nat\<Rightarrow>nat) \<and>
sampling df init$_{\text{pos}}$ \<delta>X = (sampling df init$_{\text{pos}}$ \<delta>t) o f
\end{isar}
This theorem states for \inlineisar+normally_behaved_distance_function+s that there is
a minimal sampling frequency assuring the safety of the measurements; samplings on
some \inlineisar$df$ gained from this minimal sampling frequency can be ``pumped up''
to samplings of these higher sampling frequencies; they do not contain more information.
Of particular interest is the second assumption, labelled ``\inlineisar|**|,'' which
establishes a lower bound from \inlineisar+w$_{\text{circ}}$+, \inlineisar+tpw+,
\inlineisar+Speed$_{\text{Max}}$+ for the sampling frequency. Methodologically, this represents
an exported constraint that can not be represented \<^emph>\<open>inside\<close> the design model: it means that the
computations have to be fast enough on the computing platform in order to assure that the
calculations are valid. It was in particular this exported constraint that forced us to give up
the original plan to generate the code from the design model and to execute this directly on the
target platform.
For our example configuration (1m diameter, 100 teeth per wheel, 80km/h max), this theorem justifies
that 14,4 kHz is indeed enough to assure valid samplings. Such properties are called
``internal consistency of the software requirements specification'' in the CENELEC
standard~@{cite "bsi:50128:2014"}, 7.2.4.22 and are usually addressed in an own report.
\<close>
chapter*[ontomodeling::text_section]\<open>The CENELEC 50128 Ontology\<close>
text\<open>
Modeling an ontology from a semi-formal text such as~@{cite"bsi:50128:2014"} is,
like any other modeling activity, not a simple one-to-one translation of some
concepts to some formalism. Rather, implicit and self-understood principles
have to be made explicit, abstractions have to be made, and decisions about
the kind of desirable user-interaction may have an influence similarly to
design decisions influenced by strengths or weaknesses of a programming language.
\<close>
section*[lhf::text_section]
\<open>Tracking Concepts and Definitions\<close>
text\<open>
\isadof is designed to annotate text elements with structured meta-information and to reference
these text elements throughout the integrated source. A classical application of this capability
is the annotation of concepts and terms definitions---be them informal, semi-formal or formal---and
their consistent referencing. In the context of our CENELEC ontology, \eg, we can translate the
third chapter of @{cite "bsi:50128:2014"} ``Terms, Definitions and Abbreviations'' directly
into our Ontology Definition Language (ODL). Picking one example out of 49, consider the definition
of the concept ``traceability'' in paragraphs 3.1.46 (a notion referenced 31 times in the standard),
which we translated directly into:
\begin{isar}
Definition*[traceability::concept]<open> degree to which relationship
can be established between two or more products of a development
process, especially those having a predecessor/successor or
master/subordinate relationship to one another. <close>
\end{isar}
In the integrated source of the odometry study, we can reference in a text element to this
concept as follows:
\begin{isar}
text*[...]<open> ... to assure <@>{concept traceability} for
<@>{requirement bitwiseAND}, we prove ... <close>
\end{isar}
The presentation of this document element inside \isadof is immediately hyperlinked against the
\inlineisar+Definition*+ element shown above; this serves as documentation of
the standard for the development team working on the integrated source. The PDF presentation
of such links depends on the actual configurations for the document generation; We will explain
this later.
CENELEC foresees also a number of roles, phases, safety integration levels, etc., which were
directly translated into HOL enumeration types usable in ontological concepts of ODL.
\begin{isar}
datatype role =
PM (* Program Manager *) | RQM (* Requirements Manager *)
| DES (* Designer *) | IMP (* Implementer *) |
| VER (* Verifier *) | VAL (* Validator *) | ...
datatype phase =
SYSDEV_ext (* System Development *) | SPl (* Software Planning *)
| SR (* Software Requirement *) | SA (* Software Architecture *)
| SDES (* Software Design *) | ...
\end{isar}
Similarly, we can formalize the Table A.5: Verification and Testing of @{cite "bsi:50128:2014"}:
a classification of \<^emph>\<open>verification and testing techniques\<close>:
\begin{isar}
datatype vnt_technique =
formal_proof "thm list" | stat_analysis
| dyn_analysis dyn_ana_kind | ...
\end{isar}
In contrast to the standard, we can parameterize \inlineisar+formal_proof+ with a list of
theorems, an entity known in the Isabelle kernel. Here, \isadof assures for text elements
annotated with theorem names, that they refer indeed to established theorems in the Isabelle
environment. Additional checks could be added to make sure that these theorems have a particular
form.
While we claim that this possibility to link to theorems (and test-results) is unique in the
world of systems attempting to assure traceability, referencing a particular (proven) theorem is
definitively not sufficient to satisfy the claimed requirement. Human evaluators will always have
to check that the provided theorem \<open>adequately\<close> represents the claim; we do not in the slightest
suggest that their work is superfluous. Our framework allows to statically check that tests or proofs
have been provided, at places where the ontology requires them to be, and both assessors and developers
can rely on this check and navigate through related information easily. It does not guarantee that
intended concepts for, \eg, safety or security have been adequately modeled.
\<close>
section*[moe::text_section]
\<open>Major Ontological Entities: Requirements and Evidence\<close>
text\<open>
We introduce central concept of a \<^emph>\<open>requirement\<close> as an ODL \inlineisar*doc_class*
based on some generic basic library \inlineisar*text_element* providing basic layout attributes.
\begin{isar}
doc_class requirement = text_element +
long_name :: "string option"
is_concerned :: "role set"
\end{isar}
where the \inlineisar*roles* are exactly the ones defined in the previous section and represent
the groups of stakeholders in the CENELEC process. Therefore, the \inlineisar+is_concerned+-attribute
allows expressing who ``owns'' this text-element. \isadof supports a role-based
presentation, \eg, different presentation styles of the
integrated source may decide to highlight, to omit, to defer into an annex, text entities
according to the role-set.
Since ODL supports single inheritance, we can express sub-requirements and therefore a style
of requirement decomposition as advocated in GSN~@{cite "kelly.ea:goal:2004"}:
\begin{isar}
doc_class sub_requirement =
decomposes :: "requirement"
relates_to :: "requirement set"
\end{isar}\<close>
section*[claimsreqevidence::text_section]\<open>Tracking Claims, Derived Requirements and Evidence\<close>
text\<open>An example for making explicit implicit principles,
consider the following statement @{cite "bsi:50128:2014"}, pp. 25.:\vspace{-1.5mm}
\begin{quote}\small
The objective of software verification is to examine and arrive at a judgment based on
evidence that output items (process, documentation, software or application) of a specific
development phase fulfill the requirements and plans with respect to completeness, correctness
and consistency.
\end{quote}\vspace{-1.5mm}
The terms \<^emph>\<open>judgment\<close> and \<^emph>\<open>evidence\<close> are used as a kind of leitmotif throughout the CENELEC
standard, but they are neither explained nor even listed in the general glossary. However, the
standard is fairly explicit on the \<^emph>\<open>phase\<close>s and the organizational roles that different stakeholders
should have in the process. Our version to express this key concept of judgment, \eg, by
the following concept:
\begin{isar}
doc_class judgement =
refers_to :: requirement
evidence :: "vnt_technique list"
status :: status
is_concerned :: "role set" <= "{VER,ASR,VAL}"
\end{isar}
As one can see, the role set is per default set to the verification team, the assessors and the
validation team.
There are different views possible here: an alternative would be to define \inlineisar+evidence+
as ontological concept with \inlineisar+vnt_technique+'s (rather than an attribute of judgement)
and consider the basis of judgments as a relation between requirements and relation:
\begin{isar}
doc_class judgement =
based_on :: "(requirement \<times> evidence) set"
status :: status
is_concerned :: "role set" <= "{VER,ASR,VAL}"
\end{isar}
More experimentation will be needed to find out what kind of ontological modeling is most
adequate for developers in the context of \isadof.
\<close>
section*[ontocontrol::text_section]\<open>Ontological Compliance\<close>
text\<open>From the variety of different possibilities for adding CENELEC annotations to the
integrated source, we will, in the following, point out three scenarios.\<close>
subsection\<open>Internal Verification of Claims in the Requirements Specification.\<close>
text\<open>In our case, the SR-team early on detected a property necessary
for error-detection of the device (c.f. @{docitem verific}):
\enlargethispage{2\baselineskip}\begin{isar}
text*[encoder_props::requirement]<open> The requirement specification team ...
C1 & C2 & C3 = 0 (bitwise logical AND operation)
C1 | C2 | C3 = 1 (bitwise logical OR operation) <close>
\end{isar}
After the Isabelle proofs shown in @{docitem verific}, we can either register the theorems
directly in an evidence statement:
\begin{isar}
text*[J1::judgement, refers_to="<@>{docitem <open>encoder_props<close>}",
evidence="[formal_proof[<@>{thm <open>Encoder_Property_1<close>},
<@>{thm <open>Encoder_Property_2<close>}]]"]
<open>The required encoder properties are in fact verified to be consistent
with the formalization of <@>{term "phase$_0$"}.<close>
\end{isar}
The references \inlineisar|<@>{...}|, called antiquotation, allow us not only to reference to
formal concepts, they are checked for consistency and there are also antiquotations that
print the formally checked content (\eg, the statement of a theorem).
\<close>
subsection\<open>Exporting Claims of the Requirements Specification.\<close>
text\<open>By definition, the main purpose of the requirement specification is the
identification of the safety requirements. As an example, we state the required precision of an
odometric function: for any normally behaved distance function \inlineisar+df+, and any representable
and valid sampling sequence that can be constructed for \inlineisar+df+, we require that the
difference between the physical distance and distance calculable from the
@{term Odometric_Position_Count} is bound by the minimal resolution of the odometer.
\begin{isar}
text*[R5::safety_requirement]<open>We can now state ... <close>
definition
Odometric_Position_Count_precise::(shaft_encoder_state list\<Rightarrow>output)\<Rightarrow>bool
where Odometric_Position_Count_precise odofunction \<equiv>
(\<forall> df. \<forall>S. normally_behaved_distance_function df
\<longrightarrow> representable S
\<longrightarrow> valid_sampling S df
\<longrightarrow> (let pos = uint(Odometric_Position_Count(odofunction S))
in \<bar>df((length S - 1)*\<delta>t$_{\text{odo}}$) - (\<delta>s$_{\text{res}}$ * pos)\<bar> \<le> \<delta>s$_{\text{res}}$))
update_instance*[R5::safety_requirement,
formal_definition:="[<@>{thm <open>Odometric_Position_Count_precise_def<close>}]"]
\end{isar}
By \inlineisar+update_instance*+, we book the property \inlineisar+Position_Count_precise_def+ as
\inlineisar+safety_requirement+, a specific sub-class of \inlineisar+requirement+s
requesting a formal definition in Isabelle.\<close>
subsection\<open>Exporting Derived Requirements.\<close>
text\<open>Finally, we discuss the situation where the verification team discovered a critical side-condition
for a major theorem necessary for the safety requirements; this was in our development the case for
the condition labelled ``\inlineisar|**|'' in @{docitem verific}. The current CENELEC standard clearly separates
``requirement specifications'' from ``verification reports,'' which is probably motivated
by the overall concern of organizational separation and of document consistency. While this
document organization is possible in \isadof, it is in our experience often counter-productive
in practice: organizations tend to defend their documents because the impact of changes is more and more
difficult to oversee. This effect results in a dramatic development slow-down and an increase of
costs. Furthermore, these barriers exclude situations where developers perfectly know, for example,
invariants, but can not communicate them to the verification team because the precise formalization
is not known in time. Rather than advocating document separation, we tend to integrate these documents,
keep proof as close as possible to definitions, and plead for consequent version control of the
integrated source, together with the proposed methods to strengthen the links between the informal
and formal parts by anti-quotations and continuous ontological checking. Instead of separation
of the documents, we would rather emphasize the \<^emph>\<open>separation of the views\<close> of the different
document representations. Such views were systematically generated out of the integrated source in
different PDF versions and for each version, document specific consistency guarantees can be
automatically enforced.
In our case study, we define this condition as predicate, declare an explanation of it as
\inlineisar+SRAC+ (CENELEC for: safety-related application condition; ontologically, this is a
derived class from \inlineisar+requirement+.) and add the definition of the predicate into the
document instance as described in the previous section.\<close>
section\<open> Core Examples for stating text-elements as doc-items.\<close>
text\<open>An "anonymous" text-item, automatically coerced into the top-class "text".\<close>
text*[tralala] \<open> Brexit means Brexit \<close>
text\<open>\appendix\<close>
chapter\<open>Appendix\<close>
text\<open>
\<^item> \inlineisar|<@>{thm refl}|: @{thm refl}
\<^item> \inlineisar|<@>{thm [source] refl}|: @{thm [source] refl}
\<^item> \inlineisar|<@>{thm[mode=Rule] conjI}|: @{thm[mode=Rule] conjI}
\<^item> \inlineisar|<@>{file "mini_odo.thy"}|: @{file "mini_odo.thy"}
\<^item> \inlineisar|<@>{value "3+4::int"}|: @{value "3+4::int"}
\<^item> \inlineisar|<@>{const hd}|: @{const hd}
\<^item> \inlineisar|<@>{theory HOL.List}|: @{theory HOL.List}
\<^item> \inlineisar|<@>{term "3"}|: @{term "3"}
\<^item> \inlineisar|<@>{type bool}|: @{type bool}
\<^item> \inlineisar|<@>{term [show_types] "f x = a + x"}|: @{term [show_types] "f x = a + x"}
\<close>
text\<open>Examples for declaration of typed doc-items "assumption" and "hypothesis",
concepts defined in the underlying ontology @{theory "Isabelle_DOF.CENELEC_50128"}. \<close>
@ -58,8 +550,6 @@ text \<open> represent a justification of the safety related applicability
condition @{SRAC \<open>ass122\<close>} aka exported constraint @{EC \<open>ass122\<close>}.\<close>
section\<open> Some Tests for Ontology Framework and its CENELEC Instance \<close>
declare_reference* [lalala::requirement, alpha="main", beta=42]
@ -77,14 +567,10 @@ text\<open> @{docitem \<open>lalala\<close>} -- produces warning. \<close>
text\<open> @{docitem (unchecked) \<open>lalala\<close>} -- produces no warning. \<close>
(*>*)
text\<open> @{docitem \<open>ass122\<close>} -- global reference to a text-item in another file. \<close>
text\<open> @{EC \<open>ass122\<close>} -- global reference to a exported constraint in another file.
Note that the link is actually a srac, which, according to
the ontology, happens to be an "ec". \<close>
end