Removed all parts that will not be submitted to the AFP.

This commit is contained in:
Achim D. Brucker 2024-04-26 02:35:00 +01:00
parent 55e42142fa
commit cd58f6b056
152 changed files with 0 additions and 35384 deletions

View File

@ -1,51 +0,0 @@
# Changelog
All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/)
and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html).
## [Unreleased]
### Added
### Changed
- Updated Isabelle version to Isabelle 2023
## [1.3.0] - 2022-07-08
### Changed
- The project-specific configuration is not part of the `ROOT` file, the formerly
used `isadof.cfg` is obsolete and no longer supported.
- Removed explicit use of `document/build` script. Requires removing the `build` script
entry from ROOT files.
- Isabelle/DOF is now a proper Isabelle component that should be installed using the
`isabelle components` command. The installation script is now only a convenient way
of installing the required AFP entries.
- `mkroot_DOF` has been renamed to `dof_mkroot` (and reimplemented in Scala).
## [1.2.0] - 2022-03-26
## [1.1.0] - 2021-03-20
### Added
- New antiquotations, consistency checks
### Changed
- Updated manual
- Restructured setup for ontologies (Isabelle theories and LaTeX styles)
## 1.0.0 - 2018-08-18
### Added
- First public release
[Unreleased]: https://git.logicalhacking.com/Isabelle_DOF/Isabelle_DOF/compare/v1.3.0/Isabelle2021...HEAD
[1.3.0]: https://git.logicalhacking.com/Isabelle_DOF/Isabelle_DOF/compare/v1.2.0/Isabelle2021...v1.3.0/Isabelle2021-1
[1.2.0]: https://git.logicalhacking.com/Isabelle_DOF/Isabelle_DOF/compare/v1.1.0/Isabelle2021...v1.2.0/Isabelle2021
[1.1.0]: https://git.logicalhacking.com/Isabelle_DOF/Isabelle_DOF/compare/v1.0.0/Isabelle2019...v1.1.0/Isabelle2021

View File

@ -1,40 +0,0 @@
To cite Isabelle/DOF in publications, please use
Achim D. Brucker, Idir Ait-Sadoune, Paolo Crisafulli, and Burkhart
Wolff. Using The Isabelle Ontology Framework: Linking the Formal
with the Informal. In Conference on Intelligent Computer Mathematics
(CICM). Lecture Notes in Computer Science, Springer-Verlag, 2018.
A BibTeX entry for LaTeX users is
@InCollection{ brucker.ea:isabelle-ontologies:2018,
abstract = {While Isabelle is mostly known as part of Isabelle/HOL (an
interactive theorem prover), it actually provides a framework
for developing a wide spectrum of applications. A particular
strength of the Isabelle framework is the combination of text
editing, formal verification, and code generation.\\\\
Up to now, Isabelle's document preparation system lacks a mechanism
for ensuring the structure of different document types (as, e.g.,
required in certification processes) in general and, in particular,
mechanism for linking informal and formal parts of a document.\\\\
In this paper, we present Isabelle/DOF, a novel Document Ontology
Framework on top of Isabelle. Isabelle/DOF allows for conventional
typesetting \emph{as well} as formal development. We show how to model
document ontologies inside Isabelle/DOF, how to use the resulting
meta-information for enforcing a certain document structure, and discuss
ontology-specific IDE support.},
address = {Heidelberg},
author = {Achim D. Brucker and Idir Ait-Sadoune and Paolo Crisafulli and Burkhart Wolff},
booktitle = {Conference on Intelligent Computer Mathematics (CICM)},
doi = {10.1007/978-3-319-96812-4_3},
keywords = {Isabelle/Isar, HOL, Ontologies},
language = {USenglish},
location = {Hagenberg, Austria},
number = {11006},
pdf = {https://www.brucker.ch/bibliography/download/2018/brucker.ea-isabelle-ontologies-2018.pdf},
publisher = {Springer-Verlag},
series = {Lecture Notes in Computer Science},
title = {Using the {Isabelle} Ontology Framework: Linking the Formal with the Informal},
url = {https://www.brucker.ch/bibliography/abstract/brucker.ea-isabelle-ontologies-2018},
year = {2018},
}

View File

@ -1,787 +0,0 @@
(*************************************************************************
* Copyright (C)
* 2019 The University of Exeter
* 2018-2019 The University of Paris-Saclay
* 2018 The University of Sheffield
*
* License:
* This program can be redistributed and/or modified under the terms
* of the 2-clause BSD-style license.
*
* SPDX-License-Identifier: BSD-2-Clause
*************************************************************************)
(*<*)
theory IsaDofApplications
imports "Isabelle_DOF.scholarly_paper"
begin
use_template "lncs"
use_ontology "Isabelle_DOF.scholarly_paper"
open_monitor*[this::article]
declare[[strict_monitor_checking=false]]
define_shortcut* isadof \<rightleftharpoons> \<open>\isadof\<close>
LaTeX \<rightleftharpoons> \<open>\LaTeX{}\<close>
dots \<rightleftharpoons> \<open>\ldots\<close>
isabelle \<rightleftharpoons> \<open>Isabelle/HOL\<close>
Protege \<rightleftharpoons> \<open>Prot{\'e}g{\'e}\<close>
(* slanted text in contrast to italics *)
define_macro* slanted_text \<rightleftharpoons> \<open>\textsl{\<close> _ \<open>}\<close>
define_macro* unchecked_label \<rightleftharpoons> \<open>\autoref{\<close> _ \<open>}\<close>
ML\<open>
fun boxed_text_antiquotation name (* redefined in these more abstract terms *) =
DOF_lib.gen_text_antiquotation name DOF_lib.report_text
(fn ctxt => DOF_lib.string_2_text_antiquotation ctxt
#> DOF_lib.enclose_env false ctxt "isarbox")
val neant = K(Latex.text("",\<^here>))
fun boxed_theory_text_antiquotation name (* redefined in these more abstract terms *) =
DOF_lib.gen_text_antiquotation name DOF_lib.report_theory_text
(fn ctxt => DOF_lib.string_2_theory_text_antiquotation ctxt
#> DOF_lib.enclose_env false ctxt "isarbox"
(* #> neant *)) (*debugging *)
fun boxed_sml_text_antiquotation name =
DOF_lib.gen_text_antiquotation name (K(K()))
(fn ctxt => Input.source_content
#> Latex.text
#> DOF_lib.enclose_env true ctxt "sml")
(* the simplest conversion possible *)
fun boxed_pdf_antiquotation name =
DOF_lib.gen_text_antiquotation name (K(K()))
(fn ctxt => Input.source_content
#> Latex.text
#> DOF_lib.enclose_env true ctxt "out")
(* the simplest conversion possible *)
fun boxed_latex_antiquotation name =
DOF_lib.gen_text_antiquotation name (K(K()))
(fn ctxt => Input.source_content
#> Latex.text
#> DOF_lib.enclose_env true ctxt "ltx")
(* the simplest conversion possible *)
fun boxed_bash_antiquotation name =
DOF_lib.gen_text_antiquotation name (K(K()))
(fn ctxt => Input.source_content
#> Latex.text
#> DOF_lib.enclose_env true ctxt "bash")
(* the simplest conversion possible *)
\<close>
setup\<open>boxed_text_antiquotation \<^binding>\<open>boxed_text\<close> #>
boxed_text_antiquotation \<^binding>\<open>boxed_cartouche\<close> #>
boxed_theory_text_antiquotation \<^binding>\<open>boxed_theory_text\<close> #>
boxed_sml_text_antiquotation \<^binding>\<open>boxed_sml\<close> #>
boxed_pdf_antiquotation \<^binding>\<open>boxed_pdf\<close> #>
boxed_latex_antiquotation \<^binding>\<open>boxed_latex\<close>#>
boxed_bash_antiquotation \<^binding>\<open>boxed_bash\<close>
\<close>
(*>*)
title*[tit::title] \<open>Using the Isabelle Ontology Framework\<close>
subtitle*[stit::subtitle]\<open>Linking the Formal with the Informal\<close>
author*[adb,
email ="''a.brucker@sheffield.ac.uk''",
orcid ="''0000-0002-6355-1200''",
affiliation ="''The University of Sheffield, Sheffield, UK''"]\<open>Achim D. Brucker\<close>
author*[idir,
email = "''idir.aitsadoune@centralesupelec.fr''",
affiliation = "''CentraleSupelec, Paris, France''"]\<open>Idir Ait-Sadoune\<close>
author*[paolo,
email = "''paolo.crisafulli@irt-systemx.fr''",
affiliation = "''IRT-SystemX, Paris, France''"]\<open>Paolo Crisafulli\<close>
author*[bu,
email = "\<open>wolff@lri.fr\<close>",
affiliation = "\<open>Université Paris-Saclay, Paris, France\<close>"]\<open>Burkhart Wolff\<close>
abstract*[abs::abstract, keywordlist="[''Ontology'',''Ontological Modeling'',''Isabelle/DOF'']"]\<open>
While Isabelle is mostly known as part of \<^isabelle> (an interactive
theorem prover), it actually provides a framework for developing a wide
spectrum of applications. A particular strength
of the Isabelle framework is the combination of text editing, formal verification,
and code generation.
Up to now, Isabelle's document preparation system lacks a mechanism
for ensuring the structure of different document types (as, e.g.,
required in certification processes) in general and, in particular,
mechanism for linking informal and formal parts of a document.
In this paper, we present \<^isadof>, a novel Document Ontology Framework
on top of Isabelle. \<^isadof> allows for conventional typesetting
\<^emph>\<open>as well\<close> as formal development. We show how to model document
ontologies inside \<^isadof>, how to use the resulting meta-information
for enforcing a certain document structure, and discuss ontology-specific
IDE support.
%% If you consider citing this paper, please refer to
%% @{cite "brucker.ea:isabelle-ontologies:2018"}.
\<close>
section*[intro::introduction]\<open> Introduction \<close>
text*[introtext::introduction, level = "Some 1"]\<open>
The linking of the \<^emph>\<open>formal\<close> to the \<^emph>\<open>informal\<close> is perhaps the
most pervasive challenge in the digitization of knowledge and its
propagation. This challenge incites numerous research efforts
summarized under the labels ``semantic web'', ``data mining'', or any
form of advanced ``semantic'' text processing. A key role in
structuring this linking play \<^emph>\<open>document ontologies\<close> (also called
\<^emph>\<open>vocabulary\<close> in the semantic web community~@{cite "w3c:ontologies:2015"}),
\<^ie>, a machine-readable form of the structure of documents as well as
the document discourse.
Such ontologies can be used for the scientific discourse within scholarly
articles, mathematical libraries, and in the engineering discourse
of standardized software certification
documents~@{cite "boulanger:cenelec-50128:2015" and "cc:cc-part3:2006"}.
Further applications are the domain-specific discourse in juridical texts or medical reports.
In general, an ontology is a formal explicit description of \<^emph>\<open>concepts\<close>
in a domain of discourse (called \<^emph>\<open>classes\<close>), properties of each concept
describing \<^emph>\<open>attributes\<close> of the concept, as well as \<^emph>\<open>links\<close> between
them. A particular link between concepts is the \<^emph>\<open>is-a\<close> relation declaring
the instances of a subclass to be instances of the super-class.
The main objective of this paper is to present \<^isadof>, a novel
framework to \<^emph>\<open>model\<close> typed ontologies and to \<^emph>\<open>enforce\<close> them during
document evolution. Based on Isabelle infrastructures, ontologies may refer to
types, terms, proven theorems, code, or established assertions.
Based on a novel adaption of the Isabelle IDE, a document is checked to be
\<^emph>\<open>conform\<close> to a particular ontology---\<^isadof> is designed to give fast user-feedback
\<^emph>\<open>during the capture of content\<close>. This is particularly valuable for document
changes, where the \<^emph>\<open>coherence\<close> between the formal and the informal parts of the
content can be mechanically checked.
To avoid any misunderstanding: \<^isadof> is \<^emph>\<open>not a theory in HOL\<close> on ontologies and operations
to track and trace links in texts, it is an \<^emph>\<open>environment to write structured text\<close> which
\<^emph>\<open>may contain\<close> \<^isabelle> definitions and proofs like mathematical articles, tech-reports and
scientific papers---as the present one, which is written in \<^isadof> itself. \<^isadof> is a plugin
into the Isabelle/Isar framework in the style of~@{cite "wenzel.ea:building:2007"}.
\<close>
(* declaring the forward references used in the subsequent sections *)
(*<*)
declare_reference*[bgrnd::text_section]
declare_reference*[isadof::text_section]
declare_reference*[ontomod::text_section]
declare_reference*[ontopide::text_section]
declare_reference*[conclusion::text_section]
(*>*)
text*[plan::introduction, level="Some 1"]\<open> The plan of the paper is as follows: we start by
introducing the underlying Isabelle system (@{text_section (unchecked) \<open>bgrnd\<close>}) followed by
presenting the essentials of \<^isadof> and its ontology language (@{text_section (unchecked) \<open>isadof\<close>}).
It follows @{text_section (unchecked) \<open>ontomod\<close>}, where we present three application
scenarios from the point of view of the ontology modeling. In @{text_section (unchecked) \<open>ontopide\<close>}
we discuss the user-interaction generated from the ontological definitions. Finally, we draw
conclusions and discuss related work in @{text_section (unchecked) \<open>conclusion\<close>}. \<close>
section*[bgrnd::text_section,main_author="Some(@{author ''bu''}::author)"]
\<open> Background: The Isabelle System \<close>
text*[background::introduction, level="Some 1"]\<open>
While Isabelle is widely perceived as an interactive theorem prover for HOL
(Higher-order Logic)~@{cite "nipkow.ea:isabelle:2002"}, we would like to emphasize the view that
Isabelle is far more than that: it is the \<^emph>\<open>Eclipse of Formal Methods Tools\<close>. This refers to the
``\<^slanted_text>\<open>generic system framework of Isabelle/Isar underlying recent versions of Isabelle.
Among other things, Isar provides an infrastructure for Isabelle plug-ins, comprising extensible
state components and extensible syntax that can be bound to ML programs. Thus, the Isabelle/Isar
architecture may be understood as an extension and refinement of the traditional `LCF approach',
with explicit infrastructure for building derivative \<^emph>\<open>systems\<close>.\<close>''~@{cite "wenzel.ea:building:2007"}
The current system framework offers moreover the following features:
\<^item> a build management grouping components into to pre-compiled sessions,
\<^item> a prover IDE (PIDE) framework~@{cite "wenzel:asynchronous:2014"} with various front-ends
\<^item> documentation - and code generators,
\<^item> an extensible front-end language Isabelle/Isar, and,
\<^item> last but not least, an LCF style, generic theorem prover kernel as
the most prominent and deeply integrated system component.
\<close>
figure*[architecture::figure,relative_width="100",file_src="''figures/isabelle-architecture.pdf''"]\<open>
The system architecture of Isabelle (left-hand side) and the
asynchronous communication between the Isabelle system and
the IDE (right-hand side). \<close>
text*[blug::introduction, level="Some 1"]\<open> The Isabelle system architecture shown in @{figure \<open>architecture\<close>}
comes with many layers, with Standard ML (SML) at the bottom layer as implementation
language. The architecture actually foresees a \<^emph>\<open>Nano-Kernel\<close> (our terminology) which
resides in the SML structure \<^ML_structure>\<open>Context\<close>. This structure provides a kind of container called
\<^emph>\<open>context\<close> providing an identity, an ancestor-list as well as typed, user-defined state
for components (plugins) such as \<^isadof>. On top of the latter, the LCF-Kernel, tactics,
automated proof procedures as well as specific support for higher specification constructs
were built. \<close>
text\<open> We would like to detail the documentation generation of the architecture,
which is based on literate specification commands such as \<^theory_text>\<open>section\<close> \<^dots>,
\<^theory_text>\<open>subsection\<close> \<^dots>, \<^theory_text>\<open>text\<close> \<^dots>, etc.
Thus, a user can add a simple text:
@{boxed_theory_text [display]\<open>
text\<open> This is a description.\<close>\<close>}
These text-commands can be arbitrarily mixed with other commands stating definitions, proofs, code, etc.,
and will result in the corresponding output in generated \<^LaTeX> or HTML documents.
Now, \<^emph>\<open>inside\<close> the textual content, it is possible to embed a \<^emph>\<open>text-antiquotation\<close>:
@{boxed_theory_text [display]\<open>
text\<open> According to the \<^emph>\<open>reflexivity\<close> axiom @{thm refl},
we obtain in \<Gamma> for @{term "fac 5"} the result @{value "fac 5"}.\<close>\<close>}
which is represented in the generated output by:
@{boxed_pdf [display]\<open>According to the reflexivity axiom $x = x$, we obtain in $\Gamma$ for $\operatorname{fac} 5$ the result $120$.\<close>}
where \<^theory_text>\<open>refl\<close> is actually the reference to the axiom of reflexivity in HOL.
For the antiquotation \<^theory_text>\<open>@{value "''fac 5''"}\<close> we assume the usual definition for
\<^theory_text>\<open>fac\<close> in HOL.
\<close>
text*[anti::introduction, level = "Some 1"]\<open> Thus, antiquotations can refer to formal content,
can be type-checked before being displayed and can be used for calculations before actually being
typeset. When editing, Isabelle's PIDE offers auto-completion and error-messages while typing the
above \<^emph>\<open>semi-formal\<close> content.\<close>
section*[isadof::technical,main_author="Some(@{author ''adb''}::author)"]\<open> \<^isadof> \<close>
text\<open> An \<^isadof> document consists of three components:
\<^item> the \<^emph>\<open>ontology definition\<close> which is an Isabelle theory file with definitions
for document-classes and all auxiliary datatypes.
\<^item> the \<^emph>\<open>core\<close> of the document itself which is an Isabelle theory
importing the ontology definition. \<^isadof> provides an own family of text-element
commands such as \<^theory_text>\<open>title*\<close>, \<^theory_text>\<open>section*\<close>, \<^theory_text>\<open>text*\<close>, etc.,
which can be annotated with meta-information defined in the underlying ontology definition.
\<^item> the \<^emph>\<open>layout definition\<close> for the given ontology exploiting this meta-information.
\<close>
text\<open>\<^isadof> is a novel Isabelle system component providing specific support for all these
three parts. Note that the document core \<^emph>\<open>may\<close>, but \<^emph>\<open>must\<close> not
use Isabelle definitions or proofs for checking the formal content---the
present paper is actually an example of a document not containing any proof.
The document generation process of \<^isadof> is currently restricted to \<^LaTeX>, which means
that the layout is defined by a set of \<^LaTeX> style files. Several layout
definitions for one ontology are possible and pave the way that different \<^emph>\<open>views\<close> for
the same central document were generated, addressing the needs of different purposes `
and/or target readers.
While the ontology and the layout definition will have to be developed by an expert
with knowledge over Isabelle and \<^isadof> and the back end technology depending on the layout
definition, the core is intended to require only minimal knowledge of these two. The situation
is similar to \<^LaTeX>-users, who usually have minimal knowledge about the content in
style-files (\<^verbatim>\<open>.sty\<close>-files). In the document core authors \<^emph>\<open>can\<close> use \<^LaTeX> commands in
their source, but this limits the possibility of using different representation technologies,
\<^eg>, HTML, and increases the risk of arcane error-messages in generated \<^LaTeX>.
The \<^isadof> ontology specification language consists basically on a notation for document classes,
where the attributes were typed with HOL-types and can be instantiated by terms HOL-terms, \<^ie>,
the actual parsers and type-checkers of the Isabelle system were reused. This has the particular
advantage that \<^isadof> commands can be arbitrarily mixed with Isabelle/HOL commands providing the
machinery for type declarations and term specifications such as enumerations. In particular,
document class definitions provide:
\<^item> a HOL-type for each document class as well as inheritance,
\<^item> support for attributes with HOL-types and optional default values,
\<^item> support for overriding of attribute defaults but not overloading, and
\<^item> text-elements annotated with document classes; they are mutable
instances of document classes.\<close>
text\<open>
Attributes referring to other ontological concepts are called \<^emph>\<open>links\<close>. The HOL-types inside the
document specification language support built-in types for Isabelle/HOL \<^theory_text>\<open>typ\<close>'s, \<^theory_text>\<open>term\<close>'s, and
\<^theory_text>\<open>thm\<close>'s reflecting internal Isabelle's internal types for these entities; when denoted in
HOL-terms to instantiate an attribute, for example, there is a specific syntax
(called \<^emph>\<open>inner syntax antiquotations\<close>) that is checked by \<^isadof> for consistency.
Document classes can have a \<^theory_text>\<open>where\<close> clause containing a regular expression over class names.
Classes with such a \<^theory_text>\<open>where\<close> were called \<^emph>\<open>monitor classes\<close>. While document classes and their
inheritance relation structure meta-data of text-elements in an object-oriented manner, monitor
classes enforce structural organization of documents via the language specified by the regular
expression enforcing a sequence of text-elements that belong to the corresponding classes. \<^vs>\<open>-0.4cm\<close>\<close>
section*[ontomod::text_section]\<open> Modeling Ontologies in \<^isadof> \<close>
text\<open> In this section, we will use the \<^isadof> document ontology language for three different
application scenarios: for scholarly papers, for mathematical exam sheets as well as standardization
documents where the concepts of the standard are captured in the ontology. For space reasons, we
will concentrate in all three cases on aspects of the modeling due to space limitations.\<close>
subsection*[scholar_onto::example]\<open> The Scholar Paper Scenario: Eating One's Own Dog Food. \<close>
text\<open> The following ontology is a simple ontology modeling scientific papers. In this
\<^isadof> application scenario, we deliberately refrain from integrating references to
(Isabelle) formal content in order demonstrate that \<^isadof> is not a framework from
Isabelle users to Isabelle users only. Of course, such references can be added easily and
represent a particular strength of \<^isadof>.\<close>
text*["paper_onto_core"::float,
main_caption="\<open>The core of the ontology definition for writing scholarly papers.\<close>"]
\<open>@{boxed_theory_text [display]\<open>
doc_class title =
short_title :: "string option" <= None
doc_class subtitle =
abbrev :: "string option" <= None
doc_class author =
affiliation :: "string"
doc_class abstract =
keyword_list :: "string list" <= None
doc_class text_section =
main_author :: "author option" <= None
todo_list :: "string list" <= "[]"
\<close>}\<close>
text\<open> The first part of the ontology \<^theory_text>\<open>scholarly_paper\<close>
(see @{float "paper_onto_core"})
contains the document class definitions
with the usual text-elements of a scientific paper. The attributes \<^theory_text>\<open>short_title\<close>,
\<^theory_text>\<open>abbrev\<close> etc are introduced with their types as well as their default values.
Our model prescribes an optional \<^theory_text>\<open>main_author\<close> and a todo-list attached to an arbitrary
text section; since instances of this class are mutable (meta)-objects of text-elements, they
can be modified arbitrarily through subsequent text and of course globally during text evolution.
Since \<^theory_text>\<open>author\<close> is a HOL-type internally generated by \<^isadof> framework and can therefore
appear in the \<^theory_text>\<open>main_author\<close> attribute of the \<^theory_text>\<open>text_section\<close> class;
semantic links between concepts can be modeled this way.
The translation of its content to, \<^eg>, Springer's \<^LaTeX> setup for the Lecture Notes in Computer
Science Series, as required by many scientific conferences, is mostly straight-forward.
\<^vs>\<open>-0.8cm\<close>\<close>
figure*[fig1::figure,relative_width="95",file_src="''figures/Dogfood-Intro.png''"]
\<open> Ouroboros I: This paper from inside \<^dots> \<close>
(*<*)declare_reference*[paper_onto_sections::float](*>*)
text\<open>\<^vs>\<open>-0.8cm\<close> @{figure \<open>fig1\<close>} shows the corresponding view in the Isabelle/PIDE of the present paper.
Note that the text uses \<^isadof>'s own text-commands containing the meta-information provided by
the underlying ontology.
We proceed by a definition of \<^theory_text>\<open>introduction\<close>'s, which we define as the extension of
\<^theory_text>\<open>text_section\<close> which is intended to capture common infrastructure:
@{boxed_theory_text [display]\<open>
doc_class introduction = text_section +
comment :: string
\<close>}
As a consequence of the definition as extension, the \<^theory_text>\<open>introduction\<close> class
inherits the attributes \<^theory_text>\<open>main_author\<close> and \<^theory_text>\<open>todo_list\<close> together with
the corresponding default values.
As a variant of the introduction, we could add here an attribute that contains the formal
claims of the article --- either here, or, for example, in the keyword list of the abstract.
As type, one could use either the built-in type \<^theory_text>\<open>term\<close> (for syntactically correct,
but not necessarily proven entity) or \<^theory_text>\<open>thm\<close> (for formally proven entities). It suffices
to add the line:
@{boxed_theory_text [display]\<open>
claims :: "thm list"
\<close>}
and to extent the \<^LaTeX>-style accordingly to handle the additional field.
Note that \<^theory_text>\<open>term\<close> and \<^theory_text>\<open>thm\<close> are types reflecting the core-types of the
Isabelle kernel. In a corresponding conclusion section, one could model analogously an
achievement section; by programming a specific compliance check in SML, the implementation
of automated forms of validation check for specific categories of papers is envisageable.
Since this requires deeper knowledge in Isabelle programming, however, we consider this out
of the scope of this paper.
We proceed more or less conventionally by the subsequent sections (@{float (unchecked)\<open>paper_onto_sections\<close>})\<close>
text*["paper_onto_sections"::float,
main_caption = "''Various types of sections of a scholarly papers.''"]\<open>
@{boxed_theory_text [display]\<open>
doc_class example = text_section +
comment :: string
doc_class conclusion = text_section +
main_author :: "author option" <= None
doc_class related_work = conclusion +
main_author :: "author option" <= None
doc_class bibliography =
style :: "string option" <= "''LNCS''"
\<close>}\<close>
(*<*)declare_reference*[paper_onto_monitor::float](*>*)
text\<open>... and finish with a monitor class definition that enforces a textual ordering
in the document core by a regular expression (@{float (unchecked) "paper_onto_monitor"}).\<close>
text*["paper_onto_monitor"::float,
main_caption = "''A monitor for the scholarly paper ontology.''"]\<open>
@{boxed_theory_text [display]\<open>
doc_class article =
trace :: "(title + subtitle + author+ abstract +
introduction + technical + example +
conclusion + bibliography) list"
where "(title ~~ \<lbrakk>subtitle\<rbrakk> ~~ \<lbrace>author\<rbrace>$^+$+ ~~ abstract ~~
introduction ~~ \<lbrace>technical || example\<rbrace>$^+$ ~~ conclusion ~~
bibliography)"
\<close>}
\<close>
text\<open> We might wish to add a component into our ontology that models figures to be included into
the document. This boils down to the exercise of modeling structured data in the style of a
functional programming language in HOL and to reuse the implicit HOL-type inside a suitable document
class \<^theory_text>\<open>figure\<close>:
@{boxed_theory_text [display]\<open>
datatype placement = h | t | b | ht | hb
doc_class figure = text_section +
relative_width :: "int" (* percent of textwidth *)
src :: "string"
placement :: placement
spawn_columns :: bool <= True
\<close>}
\<close>
text\<open> Alternatively, by including the HOL-libraries for rationals, it is possible to
use fractions or even mathematical reals. This must be counterbalanced by syntactic
and semantic convenience. Choosing the mathematical reals, \<^eg>, would have the drawback that
attribute evaluation could be substantially more complicated.\<close>
figure*[fig_figures::figure,relative_width="85",file_src="''figures/Dogfood-figures.png''"]
\<open> Ouroboros II: figures \<^dots> \<close>
text\<open> The document class \<^theory_text>\<open>figure\<close> --- supported by the \<^isadof> text command
\<^theory_text>\<open>figure*\<close> --- makes it possible to express the pictures and diagrams in this paper
such as @{figure \<open>fig_figures\<close>}.
\<close>
subsection*[math_exam::example]\<open> The Math-Exam Scenario \<close>
text\<open> The Math-Exam Scenario is an application with mixed formal and
semi-formal content. It addresses applications where the author of the exam is not present
during the exam and the preparation requires a very rigorous process, as the french
\<^emph>\<open>baccaleaureat\<close> and exams at The University of Sheffield.
We assume that the content has four different types of addressees, which have a different
\<^emph>\<open>view\<close> on the integrated document:
\<^item> the \<^emph>\<open>setter\<close>, \<^ie>, the author of the exam,
\<^item> the \<^emph>\<open>checker\<close>, \<^ie>, an internal person that checks
the exam for feasibility and non-ambiguity,
\<^item> the \<^emph>\<open>external examiner\<close>, \<^ie>, an external person that checks
the exam for feasibility and non-ambiguity, and
\<^item> the \<^emph>\<open>student\<close>, \<^ie>, the addressee of the exam.
\<close>
text\<open> The latter quality assurance mechanism is used in many universities,
where for organizational reasons the execution of an exam takes place in facilities
where the author of the exam is not expected to be physically present.
Furthermore, we assume a simple grade system (thus, some calculation is required). \<close>
text*["onto_exam"::float,
main_caption = "''The core of the ontology modeling math exams.''"]\<open>
@{boxed_theory_text [display]\<open>
doc_class Author = ...
datatype Subject = algebra | geometry | statistical
datatype Grade = A1 | A2 | A3
doc_class Header = examTitle :: string
examSubject :: Subject
date :: string
timeAllowed :: int -- minutes
datatype ContentClass = setter
| checker
| external_examiner
| student
doc_class Exam_item =
concerns :: "ContentClass set"
doc_class Exam_item =
concerns :: "ContentClass set"
type_synonym SubQuestion = string
\<close>}\<close>
(*<*)declare_reference*[onto_questions::float](*>*)
text\<open>The heart of this ontology (see @{float "onto_exam"}) is an alternation of questions and answers,
where the answers can consist of simple yes-no answers (QCM style check-boxes) or lists of formulas.
Since we do not
assume familiarity of the students with Isabelle (\<^theory_text>\<open>term\<close> would assume that this is a
parse-able and type-checkable entity), we basically model a derivation as a sequence of strings
(see @{float (unchecked)"onto_questions"}).\<close>
text*["onto_questions"::float,
main_caption = "''An exam can contain different types of questions.''"]\<open>
@{boxed_theory_text [display]\<open>
doc_class Answer_Formal_Step = Exam_item +
justification :: string
"term" :: "string"
doc_class Answer_YesNo = Exam_item +
step_label :: string
yes_no :: bool -- \<open>for checkboxes\<close>
datatype Question_Type =
formal | informal | mixed
doc_class Task = Exam_item +
level :: Level
type :: Question_Type
subitems :: "(SubQuestion *
(Answer_Formal_Step list + Answer_YesNo) list) list"
concerns :: "ContentClass set" <= "UNIV"
mark :: int
doc_class Exercise = Exam_item +
type :: Question_Type
content :: "(Task) list"
concerns :: "ContentClass set" <= "UNIV"
mark :: int
\<close>}\<close>
(*<*)declare_reference*[onto_exam_monitor::float](*>*)
text\<open>
In many institutions, it makes sense to have a rigorous process of validation
for exam subjects: is the initial question correct? Is a proof in the sense of the
question possible? We model the possibility that the @{term examiner} validates a
question by a sample proof validated by Isabelle (see @{float (unchecked) "onto_exam_monitor"}).
In our scenario this sample proofs are completely \<^emph>\<open>intern\<close>, \<^ie>, not exposed to the
students but just additional material for the internal review process of the exam.\<close>
text*["onto_exam_monitor"::float,
main_caption = "''Validating exams.''"]\<open>
@{boxed_theory_text [display]\<open>
doc_class Validation =
tests :: "term list" <="[]"
proofs :: "thm list" <="[]"
doc_class Solution = Exam_item +
content :: "Exercise list"
valids :: "Validation list"
concerns :: "ContentClass set" <= "{setter,checker,external_examiner}"
doc_class MathExam=
content :: "(Header + Author + Exercise) list"
global_grade :: Grade
where "\<lbrace>Author\<rbrace>$^+$ ~~ Header ~~ \<lbrace>Exercise ~~ Solution\<rbrace>$^+$ "
\<close>}\<close>
(*<*)declare_reference*["fig_qcm"::figure](*>*)
text\<open> Using the \<^LaTeX> package hyperref, it is possible to conceive an interactive
exam-sheets with multiple-choice and/or free-response elements
(see @{figure (unchecked) \<open>fig_qcm\<close>}). With the
help of the latter, it is possible that students write in a browser a formal mathematical
derivation---as part of an algebra exercise, for example---which is submitted to the examiners
electronically. \<close>
figure*[fig_qcm::figure,
relative_width="90",file_src="''figures/InteractiveMathSheet.png''"]
\<open>A Generated QCM Fragment \<^dots> \<close>
subsection*[cenelec_onto::example]\<open> The Certification Scenario following CENELEC \<close>
text\<open> Documents to be provided in formal certifications (such as CENELEC
50126/50128, the DO-178B/C, or Common Criteria) can much profit from the control of ontological
consistency: a lot of an evaluators work consists in tracing down the links from requirements over
assumptions down to elements of evidence, be it in the models, the code, or the tests.
In a certification process, traceability becomes a major concern; and providing
mechanisms to ensure complete traceability already at the development of the
global document will clearly increase speed and reduce risk and cost of a
certification process. Making the link-structure machine-checkable, be it between requirements,
assumptions, their implementation and their discharge by evidence (be it tests, proofs, or
authoritative arguments), is therefore natural and has the potential to decrease the cost
of developments targeting certifications. Continuously checking the links between the formal
and the semi-formal parts of such documents is particularly valuable during the (usually
collaborative) development effort.
As in many other cases, formal certification documents come with an own terminology and pragmatics
of what has to be demonstrated and where, and how the trace-ability of requirements through
design-models over code to system environment assumptions has to be assured.
\<close>
(*<*)declare_reference*["conceptual"::float](*>*)
text\<open> In the sequel, we present a simplified version of an ontological model used in a
case-study~ @{cite "bezzecchi.ea:making:2018"}. We start with an introduction of the concept of requirement
(see @{float (unchecked) "conceptual"}). \<close>
text*["conceptual"::float,
main_caption = "''Modeling requirements.''"]\<open>
@{boxed_theory_text [display]\<open>
doc_class requirement = long_name :: "string option"
doc_class requirement_analysis = no :: "nat"
where "requirement_item +"
doc_class hypothesis = requirement +
hyp_type :: hyp_type <= physical (* default *)
datatype ass_kind = informal | semiformal | formal
doc_class assumption = requirement +
assumption_kind :: ass_kind <= informal
\<close>}\<close>
text\<open>Such ontologies can be enriched by larger explanations and examples, which may help
the team of engineers substantially when developing the central document for a certification,
like an explication what is precisely the difference between an \<^emph>\<open>hypothesis\<close> and an
\<^emph>\<open>assumption\<close> in the context of the evaluation standard. Since the PIDE makes for each
document class its definition available by a simple mouse-click, this kind on meta-knowledge
can be made far more accessible during the document evolution.
For example, the term of category \<^emph>\<open>assumption\<close> is used for domain-specific assumptions.
It has formal, semi-formal and informal sub-categories. They have to be
tracked and discharged by appropriate validation procedures within a
certification process, by it by test or proof. It is different from a hypothesis, which is
globally assumed and accepted.
In the sequel, the category \<^emph>\<open>exported constraint\<close> (or \<^emph>\<open>ec\<close> for short)
is used for formal assumptions, that arise during the analysis,
design or implementation and have to be tracked till the final
evaluation target, and discharged by appropriate validation procedures
within the certification process, by it by test or proof. A particular class of interest
is the category \<^emph>\<open>safety related application condition\<close> (or \<^emph>\<open>srac\<close>
for short) which is used for \<^emph>\<open>ec\<close>'s that establish safety properties
of the evaluation target. Their track-ability throughout the certification
is therefore particularly critical. This is naturally modeled as follows:
@{boxed_theory_text [display]\<open>
doc_class ec = assumption +
assumption_kind :: ass_kind <= (*default *) formal
doc_class srac = ec +
assumption_kind :: ass_kind <= (*default *) formal
\<close>}
\<close>
section*[ontopide::technical]\<open> Ontology-based IDE support \<close>
text\<open> We present a selection of interaction scenarios @{example \<open>scholar_onto\<close>}
and @{example \<open>cenelec_onto\<close>} with Isabelle/PIDE instrumented by \<^isadof>. \<close>
(*<*)
declare_reference*["text_elements"::float]
declare_reference*["hyperlinks"::float]
(*>*)
subsection*[scholar_pide::example]\<open> A Scholarly Paper \<close>
text\<open> In @{float (unchecked) "text_elements"}~(a)
and @{float (unchecked) "text_elements"}~(b)we show how
hovering over links permits to explore its meta-information.
Clicking on a document class identifier permits to hyperlink into the corresponding
class definition (@{float (unchecked) "hyperlinks"}~(a)); hovering over an attribute-definition
(which is qualified in order to disambiguate; @{float (unchecked) "hyperlinks"}~(b)).
\<close>
text*["text_elements"::float,
main_caption="\<open>Exploring text elements.\<close>"]
\<open>
@{fig_content (width=53, height=5, caption="Exploring a reference of a text element.") "figures/Dogfood-II-bgnd1.png"
}\<^hfill>@{fig_content (width=47, height=5, caption="Exploring the class of a text element.") "figures/Dogfood-III-bgnd-text_section.png"}
\<close>
text*["hyperlinks"::float,
main_caption="\<open>Hyperlinks.\<close>"]
\<open>
@{fig_content (width=48, caption="Hyperlink to Class-Definition.") "figures/Dogfood-IV-jumpInDocCLass.png"
}\<^hfill>@{fig_content (width=47, caption="Exploring an attribute.") "figures/Dogfood-V-attribute.png"}
\<close>
subsection*[cenelec_pide::example]\<open> CENELEC \<close>
(*<*)declare_reference*[figfig3::figure](*>*)
text\<open> The corresponding view in @{figure (unchecked) \<open>figfig3\<close>} shows core part of a document,
coherent to the @{example \<open>cenelec_onto\<close>}. The first sample shows standard Isabelle antiquotations
@{cite "wenzel:isabelle-isar:2017"} into formal entities of a theory. This way, the informal parts
of a document get ``formal content'' and become more robust under change.\<close>
figure*[figfig3::figure,relative_width="80",file_src="''figures/antiquotations-PIDE.png''"]
\<open> Standard antiquotations referring to theory elements.\<close>
(*<*)declare_reference*[figfig5::figure] (*>*)
text\<open> The subsequent sample in @{figure (unchecked) \<open>figfig5\<close>} shows the definition of an
\<^emph>\<open>safety-related application condition\<close>, a side-condition of a theorem which
has the consequence that a certain calculation must be executed sufficiently fast on an embedded
device. This condition can not be established inside the formal theory but has to be
checked by system integration tests.\<close>
figure*[figfig5::figure, relative_width="80", file_src="''figures/srac-definition.png''"]
\<open> Defining a SRAC reference \<^dots> \<close>
figure*[figfig7::figure, relative_width="80", file_src="''figures/srac-as-es-application.png''"]
\<open> Using a SRAC as EC document reference. \<close>
text\<open> Now we reference in @{figure \<open>figfig7\<close>} this safety-related condition;
however, this happens in a context where general \<^emph>\<open>exported constraints\<close> are listed.
\<^isadof>'s checks establish that this is legal in the given ontology.
This example shows that ontological modeling is indeed adequate for large technical,
collaboratively developed documentations, where modifications can lead easily to incoherence.
The current checks help to systematically avoid this type of incoherence between formal and
informal parts. \<close>
section*[onto_future::technical]\<open> Monitor Classes \<close>
text\<open> Besides sub-typing, there is another relation between
document classes: a class can be a \<^emph>\<open>monitor\<close> to other ones,
which is expressed by the occurrence of a @{theory_text \<open>where\<close>} clause
in the document class definition containing a regular
expression (see @{example \<open>scholar_onto\<close>}).
While class-extension refers to data-inheritance of attributes,
a monitor imposes structural constraints -- the order --
in which instances of monitored classes may occur. \<close>
text\<open>
The control of monitors is done by the commands:
\<^item> \<^theory_text>\<open>open_monitor*\<close> \<^emph>\<open><doc-class>\<close>
\<^item> \<^theory_text>\<open>close_monitor*\<close> \<^emph>\<open><doc-class>\<close>
\<close>
text\<open>
where the automaton of the monitor class is expected to be in a final state. In the final state,
user-defined SML Monitors can be nested, so it is possible to "overlay" one or more monitoring
classes and imposing different sets of structural constraints in a Classes which are neither
directly nor indirectly (via inheritance) mentioned in the monitor are \<^emph>\<open>independent\<close> from a
monitor; instances of independent test elements may occur freely. \<close>
section*[conclusion::conclusion]\<open> Conclusion and Related Work\<close>
text\<open> We have demonstrated the use of \<^isadof>, a novel ontology modeling and enforcement
IDE deeply integrated into the Isabelle/Isar Framework. The two most distinguishing features are
\<^item> \<^isadof> and its ontology language are a strongly typed language that allows
for referring (albeit not reasoning) to entities of \<^isabelle>, most notably types, terms,
and (formally proven) theorems, and
\<^item> \<^isadof> is supported by the Isabelle/PIDE framework; thus, the advantages of an IDE for
text-exploration (which is the type of this link? To which text element does this link refer?
Which are the syntactic alternatives here?) were available during editing
instead of a post-hoc validation process.
\<close>
text\<open> Of course, a conventional batch-process also exists which can be used
for the validation of large document bases in a conventional continuous build process.
This combination of formal and semi-informal elements, as well as a systematic enforcement
of the coherence to a document ontology of the latter, is, as we believe, novel and offers
a unique potential for the semantic treatment of scientific texts and technical documentations. \<close>
text\<open>
To our knowledge, this is the first ontology-driven framework for
editing mathematical and technical documents that focuses particularly
on documents mixing formal and informal content---a type of documents
that is very common in technical certification processes. We see
mainly one area of related works: IDEs and text editors that support
editing and checking of documents based on an ontology. There is a
large group of ontology editors (\<^eg>, \<^Protege>~@{cite "protege"},
Fluent Editor~@{cite "cognitum"}, NeOn~@{cite "neon"}, or
OWLGrEd~@{cite "owlgred"}). With them, we share the support for defining
ontologies as well as auto-completion when editing documents based on
an ontology. While our ontology definitions are currently based on a
textual definition, widely used ontology editors (\<^eg>,
OWLGrEd~@{cite "owlgred"}) also support graphical notations. This could
be added to \<^isadof> in the future. A unique feature of \<^isadof> is the
deep integration of formal and informal text parts. The only other
work in this area we are aware of is rOntorium~@{cite "rontorium"}, a plugin
for \<^Protege> that integrates R~@{cite "adler:r:2010"} into an
ontology environment. Here, the main motivation behind this
integration is to allow for statistically analyze ontological
documents. Thus, this is complementary to our work.\<close>
text\<open> \<^isadof> in its present form has a number of technical short-comings as well
as potentials not yet explored. On the long list of the short-comings is the
fact that strings inside HOL-terms do not support, for example, Unicode.
For the moment, \<^isadof> is conceived as an
add-on for \<^isabelle>; a much deeper integration of \<^isadof> into Isabelle
could increase both performance and uniformity. Finally, different target
presentation (such as HTML) would be highly desirable in particular for the
math exam scenarios. And last but not least, it would be desirable that PIDE
itself is ``ontology-aware'' and can, for example, use meta-information
to control read- and write accesses of \<^emph>\<open>parts\<close> of documents.
\<close>
paragraph\<open> Availability. \<close>
text\<open> The implementation of the framework, the discussed ontology definitions,
and examples are available at
\url{\dofurl}.\<close>
paragraph\<open> Acknowledgement. \<close>
text\<open> This work was partly supported by the framework of IRT SystemX, Paris-Saclay, France,
and therefore granted with public funds within the scope of the Program ``Investissements dAvenir''.\<close>
(*<*)
section*[bib::bibliography]\<open>References\<close>
close_monitor*[this]
end
(*>*)

View File

@ -1,23 +0,0 @@
chapter AFP
session "Isabelle_DOF-Example-I" (AFP) = "Isabelle_DOF" +
options [document = pdf, document_output = "output", document_build = dof, timeout = 300]
theories
IsaDofApplications
document_files
"root.bib"
"authorarchive.sty"
"preamble.tex"
"lstisadof-manual.sty"
"figures/isabelle-architecture.pdf"
"figures/Dogfood-Intro.png"
"figures/InteractiveMathSheet.png"
"figures/Dogfood-II-bgnd1.png"
"figures/Dogfood-III-bgnd-text_section.png"
"figures/Dogfood-IV-jumpInDocCLass.png"
"figures/Dogfood-III-bgnd-text_section.png"
"figures/Dogfood-V-attribute.png"
"figures/antiquotations-PIDE.png"
"figures/srac-definition.png"
"figures/srac-as-es-application.png"
"figures/Dogfood-figures.png"

View File

@ -1,345 +0,0 @@
%% Copyright (C) 2008-2023 Achim D. Brucker, https://www.brucker.ch
%%
%% License:
%% This program can be redistributed and/or modified under the terms
%% of the LaTeX Project Public License Distributed from CTAN
%% archives in directory macros/latex/base/lppl.txt; either
%% version 1.3c of the License, or (at your option) any later version.
%% OR
%% The 2-clause BSD-style license.
%%
%% SPDX-License-Identifier: LPPL-1.3c+ OR BSD-2-Clause
\NeedsTeXFormat{LaTeX2e}\relax
\ProvidesPackage{authorarchive}
[2023/02/10 v1.3.0
Self-archiving information for scientific publications.]
%
\PassOptionsToPackage{hyphens}{url}
%
\RequirePackage{ifthen}
\RequirePackage[inline]{enumitem}
\RequirePackage{orcidlink}
\RequirePackage{eso-pic}
\RequirePackage{intopdf}
\RequirePackage{kvoptions}
\RequirePackage{hyperref}
\RequirePackage{calc}
\RequirePackage{qrcode}
\RequirePackage{etoolbox}
\newrobustcmd\BibTeX{Bib\TeX}
%
%Better url breaking
\g@addto@macro{\UrlBreaks}{\UrlOrds}
%
% Option declarations
% -------------------
\SetupKeyvalOptions{
family=AA,
prefix=AA@
}
%
\DeclareStringOption[.]{bibtexdir}
\DeclareStringOption[https://duckduckgo.com/?q=]{baseurl}
\DeclareStringOption[.pdf]{suffix}
\DeclareStringOption[UNKNOWN PUBLISHER]{publisher}[]
\DeclareStringOption[UNKNOWN YEAR]{year}[]
\DeclareStringOption[]{key}[]
\DeclareStringOption[]{doi}[]
\DeclareStringOption[]{doiText}[]
\DeclareStringOption[]{publisherurl}[]
\DeclareStringOption[UNKNOWN START PAGE]{startpage}[]
\DeclareStringOption[UNKNOWN PUBLICATION]{publication}[]
\DeclareBoolOption{ACM}
\DeclareBoolOption{acmart}
\DeclareBoolOption{ENTCS}
\DeclareBoolOption{IEEE}
\DeclareBoolOption{LNCS}
\DeclareBoolOption{LNI}
\DeclareBoolOption{nocopyright}
\DeclareBoolOption{nourl}
\DeclareBoolOption{nobib}
\DeclareBoolOption{orcidicon}
%\ProcessOptions\relax
% Default option rule
\DeclareDefaultOption{%
\ifx\CurrentOptionValue\relax
\PackageWarningNoLine{\@currname}{%
Unknown option `\CurrentOption'\MessageBreak
is passed to package `authorarchive'%
}%
% Pass the option to package color.
% Again it is better to expand \CurrentOption.
\expandafter\PassOptionsToPackage\expandafter{\CurrentOption}{color}%
\else
% Package color does not take options with values.
% We provide the standard LaTeX error.
\@unknownoptionerror
\fi
}
\ProcessKeyvalOptions*
\newcommand{\AA@defIncludeFiles}{
\def\AA@bibBibTeX{\AA@bibtexdir/\AA@key.bib}
\def\AA@bibBibTeXLong{\AA@bibtexdir/\AA@key.bibtex}
\def\AA@bibWord{\AA@bibtexdir/\AA@key.word.xml}
\def\AA@bibEndnote{\AA@bibtexdir/\AA@key.enw}
\def\AA@bibRIS{\AA@bibtexdir/\AA@key.ris}
}
\AA@defIncludeFiles
\newboolean{AA@bibExists}
\setboolean{AA@bibExists}{false}
\newcommand{\AA@defIncludeSwitches}{
\IfFileExists{\AA@bibBibTeX}{\setboolean{AA@bibExists}{true}}{}
\IfFileExists{\AA@bibBibTeXLong}{\setboolean{AA@bibExists}{true}}{}
\IfFileExists{\AA@bibWord}{\setboolean{AA@bibExists}{true}}{}
\IfFileExists{\AA@bibEndnote}{\setboolean{AA@bibExists}{true}}{}
\IfFileExists{\AA@bibRIS}{\setboolean{AA@bibExists}{true}}{}
}
\AA@defIncludeSwitches
% Provide command for dynamic configuration setup
% \def\authorsetup{\kvsetkeys{AA}}
\newcommand{\authorsetup}[1]{%
\kvsetkeys{AA}{#1}
\AA@defIncludeFiles
\AA@defIncludeSwitches
}
% Load local configuration
\InputIfFileExists{authorarchive.config}{}{}
% define proxy command for setting PDF attributes
\ExplSyntaxOn
\@ifundefined{pdfmanagement_add:nnn}{%
\newcommand{\AA@pdfpagesattribute}[2]{\pdfpagesattr{/#1 #2}}%
}{%
\newcommand{\AA@pdfpagesattribute}[2]{\pdfmanagement_add:nnn{Pages}{#1}{#2}}%
}%
\ExplSyntaxOff
\newlength\AA@x
\newlength\AA@y
\newlength\AA@width
\setlength\AA@x{1in+\hoffset+\oddsidemargin}
\newcommand{\authorcrfont}{\footnotesize}
\newcommand{\authorat}[1]{\AtPageUpperLeft{\put(\LenToUnit{\AA@x},\LenToUnit{.2cm-\paperheight}){#1}}}
\newcommand{\authorwidth}[1]{\setlength{\AA@width}{#1}}
\setlength{\AA@width}{\textwidth}
\def\AA@pageinfo{}
\ifthenelse{\equal{\AA@startpage}{UNKNOWN START PAGE}}{%
}{%
\setcounter{page}{\AA@startpage}%
\def\AA@pageinfo{pp. \thepage--\pageref{\aa@lastpage}, }
}
%%%% sig-alternate.cls
\ifAA@ACM%
\ifthenelse{\equal{\AA@publisher}{UNKNOWN PUBLISHER}}{%
\setkeys{AA}{publisher=ACM}
}{}
\global\boilerplate={}
\global\copyrightetc={}
\renewcommand{\conferenceinfo}[2]{}
\renewcommand{\authorcrfont}{\scriptsize}
\setlength\AA@x{1in+\hoffset+\oddsidemargin}
\setlength\AA@y{-\textheight+\topmargin+\headheight-\footskip} % -\voffset-\topmargin-\headheight-\footskip}
\renewcommand{\authorat}[1]{\put(\LenToUnit{\AA@x},\LenToUnit{\AA@y}){#1}}
\setlength{\AA@width}{\columnwidth}
\fi
%
%%%% acmart.cls
\ifAA@acmart%
\ifthenelse{\equal{\AA@publisher}{UNKNOWN PUBLISHER}}{%
\setkeys{AA}{publisher=ACM}
}{}
\renewcommand{\authorat}[1]{\AtPageUpperLeft{\put(\LenToUnit{\AA@x},\LenToUnit{0.2cm-\paperheight}){#1}}}
\setlength{\AA@width}{\textwidth}
\fi
%
%%%% LNCS
\ifAA@LNCS%
\ifAA@orcidicon%
\renewcommand{\orcidID}[1]{\orcidlink{#1}}
\else\relax\fi%
%
\ifthenelse{\equal{\AA@publisher}{UNKNOWN PUBLISHER}}{%
\setkeys{AA}{publisher=Springer-Verlag}
}{}
\renewcommand{\authorcrfont}{\scriptsize}
\@ifclasswith{llncs}{a4paper}{%
\AA@pdfpagesattribute{CropBox}{[92 114 523 780]}%
\renewcommand{\authorat}[1]{\put(\LenToUnit{\AA@x},40){#1}}%
}{%
\AA@pdfpagesattribute{CropBox}{[92 65 523 731]}%
\renewcommand{\authorat}[1]{\put(\LenToUnit{\AA@x},23){#1}}%
}
\setlength{\AA@width}{\textwidth}
\setcounter{tocdepth}{2}
\fi
%
%%%% LNI
\ifAA@LNI%
\ifthenelse{\equal{\AA@publisher}{UNKNOWN PUBLISHER}}{%
\setkeys{AA}{publisher=GI}
}{}
\renewcommand{\authorat}[1]{\put(\LenToUnit{\AA@x},35){#1}}
\renewcommand{\authorcrfont}{\scriptsize}
\AA@pdfpagesattribute{CropBox}{[70 65 526.378 748.15]}
\setlength{\AA@width}{\textwidth}
\setcounter{tocdepth}{2}
\fi
%
%%%% ENTCS
\ifAA@ENTCS%
\addtolength{\voffset}{1cm}
\ifthenelse{\equal{\AA@publisher}{UNKNOWN PUBLISHER}}{%
\setkeys{AA}{publisher=Elsevier Science B.~V.}
}{}
\renewcommand{\authorat}[1]{\put(\LenToUnit{\AA@x},\LenToUnit{-.5cm-\the\ht\AA@authoratBox}){#1}}
\renewcommand{\authorcrfont}{\scriptsize}
\setlength{\AA@width}{\textwidth}
\fi
%
%%%% IEEE
\ifAA@IEEE%
\ifthenelse{\equal{\AA@publisher}{UNKNOWN PUBLISHER}}{%
\setkeys{AA}{publisher=IEEE}
}{}
\renewcommand{\authorat}[1]{\put(\LenToUnit{\AA@x},6){#1}}
\renewcommand{\authorcrfont}{\scriptsize}
\setlength{\AA@width}{\textwidth}
\setcounter{tocdepth}{2}
\fi
%
\hypersetup{%
draft = false,
bookmarksopen = true,
bookmarksnumbered= true,
}
\@ifpackageloaded{totpages}{%
\def\aa@lastpage{TotPages}
}{%
\RequirePackage{lastpage}
\def\aa@lastpage{LastPage}
}
\newsavebox{\AA@authoratBox}
\AddToShipoutPicture*{%
\setlength{\unitlength}{1mm}%
\savebox{\AA@authoratBox}{%
\parbox{1.4cm}{%
\bgroup%
\normallineskiplimit=0pt%
\ifAA@nourl%
\ifx\AA@doi\@empty\relax%
\else%
\qrcode[hyperlink,height=1.17cm,padding]{https://doi.org/\AA@doi}%
\fi%
\else%
\qrcode[hyperlink,height=1.17cm,padding]{\AA@baseurl/\AA@key\AA@suffix}%
\fi%
\egroup%
}%
\ifAA@nourl\ifx\AA@doi\@empty\addtolength{\AA@width}{1.4cm}\fi\fi
\parbox{\AA@width-1.4cm}{\authorcrfont%
\ifAA@LNCS%
\AA@publication, \AA@pageinfo \AA@year. %
\ifAA@nocopyright\else
\textcopyright~\AA@year~\AA@publisher.
\fi
This is the author's
version of the work. It is posted
\ifAA@nourl\relax\else%
at \url{\AA@baseurl/\AA@key\AA@suffix} %
\fi
\ifAA@nocopyright\relax\else
by permission of \AA@publisher{}
\fi
for your personal use.
\ifx\AA@doi\@empty%
\relax
\else
The final publication is available at Springer via
\ifx\AA@doiText\@empty%
\url{https://doi.org/\AA@doi}.
\else
\href{https://doi.org/\AA@doi}{\AA@doiText}.
\fi
\fi
\else
\ifAA@nocopyright\relax\else
\textcopyright~\AA@year~\AA@publisher. %
\fi%
This is the author's
version of the work. It is posted
\ifAA@nourl\relax\else%
at \url{\AA@baseurl/\AA@key\AA@suffix} %
\fi
\ifAA@nocopyright\relax\else
by permission of \AA@publisher{} %
\fi
for your personal use. Not for redistribution. The definitive
version was published in \emph{\AA@publication}, \AA@pageinfo \AA@year%
\ifx\AA@doi\@empty%
\ifx\AA@publisherurl\@empty%
.%
\else
\url{\AA@publisherurl}.%
\fi
\else
\ifx\AA@doiText\@empty%
, doi: \href{https://doi.org/\AA@doi}{\AA@doi}.%
\else
, doi: \href{https://doi.org/\AA@doi}{\AA@doiText}.%
\fi
\fi
\fi
\ifAA@nobib\relax\else%
\ifthenelse{\boolean{AA@bibExists}}{%
\hfill
\begin{itemize*}[label={}, itemjoin={,}]
\IfFileExists{\AA@bibBibTeX}{%
\item \expanded{\attachandlink[\AA@key.bib]{\AA@bibBibTeX}[application/x-bibtex]{BibTeX entry of this paper}{\BibTeX}}%
}{%
\IfFileExists{\AA@bibBibTeXLong}{%
\item \expanded{\attachandlink[\AA@key.bib]{\AA@bibBibTeXLong}[application/x-bibtex]{BibTeX entry of this paper}{\BibTeX}}%
}{%
\typeout{No file \AA@bibBibTeX{} (and no \AA@bibBibTeXLong) found. Not embedded reference in BibTeX format.}%
}%
}%
\IfFileExists{\AA@bibWord}{%
\item \expanded{\attachandlink[\AA@key.word.xml]{\AA@bibWord}[application/xml]{XML entry of this paper (e.g., for Word 2007 and later)}{Word}}%
}{%
\typeout{No file \AA@bibWord{} found. Not embedded reference for Word 2007 and later.}%
}%
\IfFileExists{\AA@bibEndnote}{%
\item \expanded{\attachandlink[\AA@key.enw]{\AA@bibEndnote}[application/x-endnote-refer]{Endnote entry of this paper}{EndNote}}%
}{%
\typeout{No file \AA@bibEndnote{} found. Not embedded reference in Endnote format.}%
}%
\IfFileExists{\AA@bibRIS}{%
\item \expanded{\attachandlink[\AA@key.ris]{\AA@bibRIS}[application/x-research-info-systems]{RIS entry of this paper}{RIS}}%
}{%
\typeout{No file \AA@bibRIS{} found. Not embedded reference in RIS format.}%
}%
\end{itemize*}\\
}{%
\PackageError{authorarchive}{No bibliographic files found. Specify option 'nobib' if this is intended.}
}
\fi
}
}
\authorat{\raisebox{\the\ht\AA@authoratBox}{\usebox{\AA@authoratBox}}}
}

Binary file not shown.

Before

Width:  |  Height:  |  Size: 14 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 18 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 23 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 85 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 16 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 18 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 75 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 96 KiB

File diff suppressed because it is too large Load Diff

Before

Width:  |  Height:  |  Size: 57 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 67 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 50 KiB

View File

@ -1,327 +0,0 @@
%% Copyright (C) 2018 The University of Sheffield
%% 2018-2021 The University of Paris-Saclay
%% 2019-2021 The University of Exeter
%%
%% License:
%% This program can be redistributed and/or modified under the terms
%% of the LaTeX Project Public License Distributed from CTAN
%% archives in directory macros/latex/base/lppl.txt; either
%% version 1.3c of the License, or (at your option) any later version.
%% OR
%% The 2-clause BSD-style license.
%%
%% SPDX-License-Identifier: LPPL-1.3c+ OR BSD-2-Clause
\usepackage{listings}
\usepackage{listingsutf8}
\usepackage{tikz}
\usepackage[many]{tcolorbox}
\tcbuselibrary{listings}
\tcbuselibrary{skins}
\usepackage{xstring}
\definecolor{OliveGreen} {cmyk}{0.64,0,0.95,0.40}
\definecolor{BrickRed} {cmyk}{0,0.89,0.94,0.28}
\definecolor{Blue} {cmyk}{1,1,0,0}
\definecolor{CornflowerBlue}{cmyk}{0.65,0.13,0,0}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% <antiquotations>
%% Hack: re-defining tag types for supporting highlighting of antiquotations
\gdef\lst@tagtypes{s}
\gdef\lst@TagKey#1#2{%
\lst@Delim\lst@tagstyle #2\relax
{Tag}\lst@tagtypes #1%
{\lst@BeginTag\lst@EndTag}%
\@@end\@empty{}}
\lst@Key{tag}\relax{\lst@TagKey\@empty{#1}}
\lst@Key{tagstyle}{}{\def\lst@tagstyle{#1}}
\lst@AddToHook{EmptyStyle}{\let\lst@tagstyle\@empty}
\gdef\lst@BeginTag{%
\lst@DelimOpen
\lst@ifextags\else
{\let\lst@ifkeywords\iftrue
\lst@ifmarkfirstintag \lst@firstintagtrue \fi}}
\lst@AddToHookExe{ExcludeDelims}{\let\lst@ifextags\iffalse}
\gdef\lst@EndTag{\lst@DelimClose\lst@ifextags\else}
\lst@Key{usekeywordsintag}t[t]{\lstKV@SetIf{#1}\lst@ifusekeysintag}
\lst@Key{markfirstintag}f[t]{\lstKV@SetIf{#1}\lst@ifmarkfirstintag}
\gdef\lst@firstintagtrue{\global\let\lst@iffirstintag\iftrue}
\global\let\lst@iffirstintag\iffalse
\lst@AddToHook{PostOutput}{\lst@tagresetfirst}
\lst@AddToHook{Output}
{\gdef\lst@tagresetfirst{\global\let\lst@iffirstintag\iffalse}}
\lst@AddToHook{OutputOther}{\gdef\lst@tagresetfirst{}}
\lst@AddToHook{Output}
{\ifnum\lst@mode=\lst@tagmode
\lst@iffirstintag \let\lst@thestyle\lst@gkeywords@sty \fi
\lst@ifusekeysintag\else \let\lst@thestyle\lst@gkeywords@sty\fi
\fi}
\lst@NewMode\lst@tagmode
\gdef\lst@Tag@s#1#2\@empty#3#4#5{%
\lst@CArg #1\relax\lst@DefDelimB {}{}%
{\ifnum\lst@mode=\lst@tagmode \expandafter\@gobblethree \fi}%
#3\lst@tagmode{#5}%
\lst@CArg #2\relax\lst@DefDelimE {}{}{}#4\lst@tagmode}%
\gdef\lst@BeginCDATA#1\@empty{%
\lst@TrackNewLines \lst@PrintToken
\lst@EnterMode\lst@GPmode{}\let\lst@ifmode\iffalse
\lst@mode\lst@tagmode #1\lst@mode\lst@GPmode\relax\lst@modetrue}
%
\def\beginlstdelim#1#2#3%
{%
\def\endlstdelim{\texttt{\textbf{\color{black!60}#2}}\egroup}%
\ttfamily\textbf{\color{black!60}#1}\bgroup\rmfamily\color{#3}\aftergroup\endlstdelim%
}
%% </antiquotations>
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% <isar>
\providecolor{isar}{named}{blue}
\renewcommand{\isacommand}[1]{\textcolor{OliveGreen!60}{\ttfamily\bfseries #1}}
\newcommand{\inlineisarbox}[1]{#1}
\NewTColorBox[]{isarbox}{}{
,boxrule=0pt
,boxsep=0pt
,colback=white!90!isar
,enhanced jigsaw
,borderline west={2pt}{0pt}{isar!60!black}
,sharp corners
%,before skip balanced=0.5\baselineskip plus 2pt % works only with Tex Live 2020 and later
,enlarge top by=0mm
,enhanced
,overlay={\node[draw,fill=isar!60!black,xshift=0pt,anchor=north
east,font=\bfseries\footnotesize\color{white}]
at (frame.north east) {Isar};}
}
%% </isar>
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% <out>
\providecolor{out}{named}{green}
\newtcblisting{out}[1][]{%
listing only%
,boxrule=0pt
,boxsep=0pt
,colback=white!90!out
,enhanced jigsaw
,borderline west={2pt}{0pt}{out!60!black}
,sharp corners
% ,before skip=10pt
% ,after skip=10pt
,enlarge top by=0mm
,enhanced
,overlay={\node[draw,fill=out!60!black,xshift=0pt,anchor=north
east,font=\bfseries\footnotesize\color{white}]
at (frame.north east) {Document};}
,listing options={
breakatwhitespace=true
,columns=flexible%
,basicstyle=\small\rmfamily
,mathescape
,#1
}
}%
%% </out>
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% <sml>
\lstloadlanguages{ML}
\providecolor{sml}{named}{red}
\lstdefinestyle{sml}{
,escapechar=ë%
,basicstyle=\ttfamily%
,commentstyle=\itshape%
,keywordstyle=\bfseries\color{CornflowerBlue}%
,ndkeywordstyle=\color{green}%
,language=ML
% ,literate={%
% {<@>}{@}1%
% }
,keywordstyle=[6]{\itshape}%
,morekeywords=[6]{args_type}%
,tag=**[s]{@\{}{\}}%
,tagstyle=\color{CornflowerBlue}%
,markfirstintag=true%
}%
\def\inlinesml{\lstinline[style=sml,breaklines=true,breakatwhitespace=true]}
\newtcblisting{sml}[1][]{%
listing only%
,boxrule=0pt
,boxsep=0pt
,colback=white!90!sml
,enhanced jigsaw
,borderline west={2pt}{0pt}{sml!60!black}
,sharp corners
% ,before skip=10pt
% ,after skip=10pt
,enlarge top by=0mm
,enhanced
,overlay={\node[draw,fill=sml!60!black,xshift=0pt,anchor=north
east,font=\bfseries\footnotesize\color{white}]
at (frame.north east) {SML};}
,listing options={
style=sml
,columns=flexible%
,basicstyle=\small\ttfamily
,#1
}
}%
%% </sml>
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% <latex>
\lstloadlanguages{TeX}
\providecolor{ltx}{named}{yellow}
\lstdefinestyle{lltx}{language=[AlLaTeX]TeX,
,basicstyle=\ttfamily%
,showspaces=false%
,escapechar=ë
,showlines=false%
,morekeywords={newisadof}
% ,keywordstyle=\bfseries%
% Defining 2-keywords
,keywordstyle=[1]{\color{BrickRed!60}\bfseries}%
% Defining 3-keywords
,keywordstyle=[2]{\color{OliveGreen!60}\bfseries}%
% Defining 4-keywords
,keywordstyle=[3]{\color{black!60}\bfseries}%
% Defining 5-keywords
,keywordstyle=[4]{\color{Blue!70}\bfseries}%
% Defining 6-keywords
,keywordstyle=[5]{\itshape}%
%
}
\lstdefinestyle{ltx}{style=lltx,
basicstyle=\ttfamily\small}%
\def\inlineltx{\lstinline[style=ltx, breaklines=true,columns=fullflexible]}
% see
% https://tex.stackexchange.com/questions/247643/problem-with-tcblisting-first-listed-latex-command-is-missing
\NewTCBListing{ltx}{ !O{} }{%
listing only%
,boxrule=0pt
,boxsep=0pt
,colback=white!90!ltx
,enhanced jigsaw
,borderline west={2pt}{0pt}{ltx!60!black}
,sharp corners
% ,before skip=10pt
% ,after skip=10pt
,enlarge top by=0mm
,enhanced
,overlay={\node[draw,fill=ltx!60!black,xshift=0pt,anchor=north
east,font=\bfseries\footnotesize\color{white}]
at (frame.north east) {\LaTeX};}
,listing options={
style=lltx,
,columns=flexible%
,basicstyle=\small\ttfamily
,#1
}
}%
%% </latex>
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% <bash>
\providecolor{bash}{named}{black}
\lstloadlanguages{bash}
\lstdefinestyle{bash}{%
language=bash
,escapechar=ë
,basicstyle=\ttfamily%
,showspaces=false%
,showlines=false%
,columns=flexible%
% ,keywordstyle=\bfseries%
% Defining 2-keywords
,keywordstyle=[1]{\color{BrickRed!60}\bfseries}%
% Defining 3-keywords
,keywordstyle=[2]{\color{OliveGreen!60}\bfseries}%
% Defining 4-keywords
,keywordstyle=[3]{\color{black!60}\bfseries}%
% Defining 5-keywords
,keywordstyle=[4]{\color{Blue!80}\bfseries}%
,alsoletter={*,-,:,~,/}
,morekeywords=[4]{}%
% Defining 6-keywords
,keywordstyle=[5]{\itshape}%
%
}
\def\inlinebash{\lstinline[style=bash, breaklines=true,columns=fullflexible]}
\newcommand\@isabsolutepath[3]{%
\StrLeft{#1}{1}[\firstchar]%
\IfStrEq{\firstchar}{/}{#2}{#3}%
}
\newcommand{\@homeprefix}[1]{%
\ifthenelse{\equal{#1}{}}{\textasciitilde}{\textasciitilde/}%
}
\newcommand{\prompt}[1]{%
\color{Blue!80}\textbf{\texttt{%
achim@logicalhacking:{\@isabsolutepath{#1}{#1}{\@homeprefix{#1}#1}}\$}}%
}
\newtcblisting{bash}[1][]{%
listing only%
,boxrule=0pt
,boxsep=0pt
,colback=white!90!bash
,enhanced jigsaw
,borderline west={2pt}{0pt}{bash!60!black}
,sharp corners
% ,before skip=10pt
% ,after skip=10pt
,enlarge top by=0mm
,enhanced
,overlay={\node[draw,fill=bash!60!black,xshift=0pt,anchor=north
east,font=\bfseries\footnotesize\color{white}]
at (frame.north east) {Bash};}
,listing options={
style=bash
,columns=flexible%
,breaklines=true%
,prebreak=\mbox{\space\textbackslash}%
,basicstyle=\small\ttfamily%
,#1
}
}%
%% </bash>
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% <config>
\providecolor{config}{named}{gray}
\newtcblisting{config}[2][]{%
listing only%
,boxrule=0pt
,boxsep=0pt
,colback=white!90!config
,enhanced jigsaw
,borderline west={2pt}{0pt}{config!60!black}
,sharp corners
% ,before skip=10pt
% ,after skip=10pt
,enlarge top by=0mm
,enhanced
,overlay={\node[draw,fill=config!60!black,xshift=0pt,anchor=north
east,font=\bfseries\footnotesize\color{white}]
at (frame.north east) {#2};}
,listing options={
breakatwhitespace=true
,columns=flexible%
,basicstyle=\small\ttfamily
,mathescape
,#1
}
}%
%% </config>
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

View File

@ -1,46 +0,0 @@
%% Copyright (C) 2018 The University of Sheffield
%% 2018 The University of Paris-Saclay
%%
%% License:
%% This program can be redistributed and/or modified under the terms
%% of the LaTeX Project Public License Distributed from CTAN
%% archives in directory macros/latex/base/lppl.txt; either
%% version 1 of the License, or any later version.
%% OR
%% The 2-clause BSD-style license.
%%
%% SPDX-License-Identifier: LPPL-1.0+ OR BSD-2-Clause
%% This is a placeholder for user-specific configuration and packages.
\IfFileExists{beramono.sty}{\usepackage[scaled=0.88]{beramono}}{}%
\IfFileExists{upquote.sty}{\usepackage{upquote}}{}%
\usepackage{textcomp}
\usepackage{xcolor}
\usepackage{paralist}
\usepackage{listings}
\usepackage{lstisadof-manual}
\providecommand{\isactrlemph}[1]{\emph{#1}}
\usepackage[LNCS,
orcidicon,
key=brucker.ea-isabelle-ontologies-2018,
year=2018,
publication={F. Rabe et al. (Eds.): CICM 2018, LNAI 11006},
nobib,
startpage={1},
doi={10.1007/978-3-319-96812-4_3},
doiText={10.1007/978-3-319-96812-4\_3},
]{authorarchive}
\authorrunning{A. D. Brucker et al.}
\pagestyle{headings}
\title{<TITLE>}
\author{<AUTHOR>}
%%% Local Variables:
%%% mode: latex
%%% TeX-master: "root.tex"
%%% End:

View File

@ -1,299 +0,0 @@
@STRING{pub-springer={Springer} }
@STRING{pub-springer:adr=""}
@STRING{s-lncs = "LNCS" }
@Manual{ wenzel:isabelle-isar:2017,
title = {The Isabelle/Isar Reference Manual},
author = {Makarius Wenzel},
OPTorganization = {},
OPTaddress = {},
OPTedition = {},
OPTmonth = {},
year = {2017},
note = {Part of the Isabelle distribution.},
OPTannote = {}
}
@Book{ adler:r:2010,
abstract = {Presents a guide to the R computer language, covering such
topics as the user interface, packages, syntax, objects,
functions, object-oriented programming, data sets, lattice
graphics, regression models, and bioconductor.},
added-at = {2013-01-10T22:39:38.000+0100},
address = {Sebastopol, CA},
author = {Adler, Joseph},
isbn = {9780596801700 059680170X},
keywords = {R},
publisher = {O'Reilly},
refid = 432987461,
title = {R in a nutshell},
year = 2010
}
@InCollection{ wenzel.ea:building:2007,
abstract = {We present the generic system framework of
Isabelle/Isarunderlying recent versions of Isabelle. Among
other things, Isar provides an infrastructure for Isabelle
plug-ins, comprising extensible state components and
extensible syntax that can be bound to tactical ML
programs. Thus the Isabelle/Isar architecture may be
understood as an extension and refinement of the
traditional LCF approach, with explicit infrastructure for
building derivative systems. To demonstrate the technical
potential of the framework, we apply it to a concrete
formalmethods tool: the HOL-Z 3.0 environment, which is
geared towards the analysis of Z specifications and formal
proof of forward-refinements.},
author = {Makarius Wenzel and Burkhart Wolff},
booktitle = {TPHOLs 2007},
editor = {Klaus Schneider and Jens Brandt},
language = {USenglish},
acknowledgement={none},
pages = {352--367},
publisher = pub-springer,
address = pub-springer:adr,
number = 4732,
series = s-lncs,
title = {Building Formal Method Tools in the {Isabelle}/{Isar}
Framework},
doi = {10.1007/978-3-540-74591-4_26},
year = 2007
}
@Misc{w3c:ontologies:2015,
title={Ontologies},
organisation={W3c},
url={https://www.w3.org/standards/semanticweb/ontology},
year=2018
}
@BOOK{boulanger:cenelec-50128:2015,
AUTHOR = "Boulanger, Jean-Louis",
TITLE = "{CENELEC} 50128 and {IEC} 62279 Standards",
PUBLISHER = "Wiley-ISTE",
YEAR = "2015",
ADDRESS = "Boston",
NOTE = "The reference on the standard."
}
@Booklet{ cc:cc-part3:2006,
bibkey = {cc:cc-part3:2006},
key = {Common Criteria},
institution = {Common Criteria},
language = {USenglish},
month = sep,
year = 2006,
public = {yes},
title = {Common Criteria for Information Technology Security
Evaluation (Version 3.1), {Part} 3: Security assurance
components},
note = {Available as document
\href{http://www.commoncriteriaportal.org/public/files/CCPART3V3.1R1.pdf}
{CCMB-2006-09-003}},
number = {CCMB-2006-09-003},
acknowledgement={brucker, 2007-04-24}
}
@Book{ nipkow.ea:isabelle:2002,
author = {Tobias Nipkow and Lawrence C. Paulson and Markus Wenzel},
title = {Isabelle/HOL---A Proof Assistant for Higher-Order
Logic},
publisher = pub-springer,
address = pub-springer:adr,
series = s-lncs,
volume = 2283,
doi = {10.1007/3-540-45949-9},
abstract = {This book is a self-contained introduction to interactive
proof in higher-order logic HOL, using the proof
assistant Isabelle2002. It is a tutorial for potential
users rather than a monograph for researchers. The book has
three parts.
1. Elementary Techniques shows how to model functional
programs in higher-order logic. Early examples involve
lists and the natural numbers. Most proofs are two steps
long, consisting of induction on a chosen variable followed
by the auto tactic. But even this elementary part covers
such advanced topics as nested and mutual recursion. 2.
Logic and Sets presents a collection of lower-level tactics
that you can use to apply rules selectively. It also
describes Isabelle/HOL's treatment of sets, functions
and relations and explains how to define sets inductively.
One of the examples concerns the theory of model checking,
and another is drawn from a classic textbook on formal
languages. 3. Advanced Material describes a variety of
other topics. Among these are the real numbers, records and
overloading. Advanced techniques are described involving
induction and recursion. A whole chapter is devoted to an
extended example: the verification of a security protocol. },
year = 2002,
acknowledgement={brucker, 2007-02-19},
bibkey = {nipkow.ea:isabelle:2002},
tags = {noTAG},
clearance = {unclassified},
timestap = {2008-05-26}
}
@InProceedings{ wenzel:asynchronous:2014,
author = {Makarius Wenzel},
title = {Asynchronous User Interaction and Tool Integration in
Isabelle/{PIDE}},
booktitle = {Interactive Theorem Proving (ITP)},
pages = {515--530},
year = 2014,
crossref = {klein.ea:interactive:2014},
doi = {10.1007/978-3-319-08970-6_33},
timestamp = {Sun, 21 May 2017 00:18:59 +0200},
abstract = { Historically, the LCF tradition of interactive theorem
proving was tied to the read-eval-print loop, with
sequential and synchronous evaluation of prover commands
given on the command-line. This user-interface technology
was adequate when R. Milner introduced his LCF proof
assistant in the 1970-ies, but it severely limits the
potential of current multicore hardware and advanced IDE
front-ends.
Isabelle/PIDE breaks this loop and retrofits the
read-eval-print phases into an asynchronous model of
document-oriented proof processing. Instead of feeding a
sequence of individual commands into the prover process,
the primary interface works via edits over a family of
document versions. Execution is implicit and managed by the
prover on its own account in a timeless and stateless
manner. Various aspects of interactive proof checking are
scheduled according to requirements determined by the
front-end perspective on the proof document, while making
adequate use of the CPU resources on multicore hardware on
the back-end.
Recent refinements of Isabelle/PIDE provide an explicit
concept of asynchronous print functions over existing proof
states. This allows to integrate long-running or
potentially non-terminating tools into the document-model.
Applications range from traditional proof state output
(which may consume substantial time in interactive
development) to automated provers and dis-provers that
report on existing proof document content (e.g.
Sledgehammer, Nitpick, Quickcheck in Isabelle/HOL).
Moreover, it is possible to integrate query operations via
additional GUI panels with separate input and output (e.g.
for Sledgehammer or find-theorems). Thus the Prover IDE
provides continuous proof processing, augmented by add-on
tools that help the user to continue writing proofs.
}
}
@Proceedings{ klein.ea:interactive:2014,
editor = {Gerwin Klein and Ruben Gamboa},
title = {Interactive Theorem Proving - 5th International
Conference, {ITP} 2014, Held as Part of the Vienna Summer
of Logic, {VSL} 2014, Vienna, Austria, July 14-17, 2014.
Proceedings},
series = s-lncs,
volume = 8558,
publisher = pub-springer,
year = 2014,
doi = {10.1007/978-3-319-08970-6},
isbn = {978-3-319-08969-0}
}
@InProceedings{ bezzecchi.ea:making:2018,
title = {Making Agile Development Processes fit for V-style
Certification Procedures},
author = {Bezzecchi, S. and Crisafulli, P. and Pichot, C. and Wolff,
B.},
booktitle = {{ERTS'18}},
abstract = {We present a process for the development of safety and
security critical components in transportation systems
targeting a high-level certification (CENELEC 50126/50128,
DO 178, CC ISO/IEC 15408).
The process adheres to the objectives of an ``agile
development'' in terms of evolutionary flexibility and
continuous improvement. Yet, it enforces the overall
coherence of the development artifacts (ranging from proofs
over tests to code) by a particular environment (CVCE).
In particular, the validation process is built around a
formal development based on the interactive theorem proving
system Isabelle/HOL, by linking the business logic of the
application to the operating system model, down to code and
concrete hardware models thanks to a series of refinement
proofs.
We apply both the process and its support in CVCE to a
case-study that comprises a model of an odometric service
in a railway-system with its corresponding implementation
integrated in seL4 (a secure kernel for which a
comprehensive Isabelle development exists). Novel
techniques implemented in Isabelle enforce the coherence of
semi-formal and formal definitions within to specific
certification processes in order to improve their
cost-effectiveness. },
pdf = {https://www.lri.fr/~wolff/papers/conf/2018erts-agile-fm.pdf},
year = 2018,
series = {ERTS Conference Proceedings},
location = {Toulouse}
}
@MISC{owl2012,
title = {OWL 2 Web Ontology Language},
note={\url{https://www.w3.org/TR/owl2-overview/}, Document Overview (Second Edition)},
author = {World Wide Web Consortium}
}
@MISC{ protege,
title = {Prot{\'e}g{\'e}},
note={\url{https://protege.stanford.edu}},
year = {2018}
}
@MISC{ cognitum,
title = {Fluent Editor},
note={\url{http://www.cognitum.eu/Semantics/FluentEditor/}},
year = {2018}
}
@MISC{ neon,
title = {The NeOn Toolkit},
note = {\url{http://neon-toolkit.org}},
year = {2018}
}
@MISC{ owlgred,
title = {OWLGrEd},
note={\url{http://owlgred.lumii.lv/}},
year = {2018}
}
@MISC{ rontorium,
title = {R Language Package for FLuent Editor (rOntorion)},
note={\url{http://www.cognitum.eu/semantics/FluentEditor/rOntorionFE.aspx}},
year = {2018}
}
@incollection{brucker.ea:isabelle-ontologies:2018,
abstract = {While Isabelle is mostly known as part of Isabelle/HOL (an interactive theorem prover), it actually provides a framework for developing a wide spectrum of applications. A particular strength of the Isabelle framework is the combination of text editing, formal verification, and code generation.\\\\Up to now, Isabelle's document preparation system lacks a mechanism for ensuring the structure of different document types (as, e.g., required in certification processes) in general and, in particular, mechanism for linking informal and formal parts of a document.\\\\In this paper, we present Isabelle/DOF, a novel Document Ontology Framework on top of Isabelle. Isabelle/DOF allows for conventional typesetting \emph{as well} as formal development. We show how to model document ontologies inside Isabelle/DOF, how to use the resulting meta-information for enforcing a certain document structure, and discuss ontology-specific IDE support.},
address = {Heidelberg},
author = {Achim D. Brucker and Idir Ait-Sadoune and Paolo Crisafulli and Burkhart Wolff},
booktitle = {Conference on Intelligent Computer Mathematics (CICM)},
doi = {10.1007/978-3-319-96812-4_3},
keywords = {Isabelle/Isar, HOL, Ontologies},
language = {USenglish},
location = {Hagenberg, Austria},
number = {11006},
pdf = {https://www.brucker.ch/bibliography/download/2018/brucker.ea-isabelle-ontologies-2018.pdf},
publisher = {Springer-Verlag},
series = {Lecture Notes in Computer Science},
title = {Using The Isabelle Ontology Framework: Linking the Formal with the Informal},
url = {https://www.brucker.ch/bibliography/abstract/brucker.ea-isabelle-ontologies-2018},
year = {2018}
}

View File

@ -1,9 +0,0 @@
chapter AFP
session "Isabelle_DOF-Example-II" (AFP) = "Isabelle_DOF" +
options [document = pdf, document_output = "output", document_build = dof, timeout = 300]
theories
"paper"
document_files
"root.bib"
"preamble.tex"

View File

@ -1,10 +0,0 @@
%% This is a placeholder for user-specific configuration and packages.
\usepackage{stmaryrd}
\usepackage{pifont}% http://ctan.org/pkg/pifont
\title{<TITLE>}
\author{<AUTHOR>}

File diff suppressed because it is too large Load Diff

View File

@ -1,982 +0,0 @@
(*<*)
theory "paper"
imports "Isabelle_DOF.scholarly_paper"
begin
use_template "scrartcl"
use_ontology "scholarly_paper"
open_monitor*[this::article]
declare[[ strict_monitor_checking = false]]
declare[[ Definition_default_class = "definition"]]
declare[[ Lemma_default_class = "lemma"]]
declare[[ Theorem_default_class = "theorem"]]
declare[[ Corollary_default_class = "corollary"]]
define_shortcut* csp \<rightleftharpoons> \<open>CSP\<close>
holcsp \<rightleftharpoons> \<open>HOL-CSP\<close>
isabelle \<rightleftharpoons> \<open>Isabelle/HOL\<close>
hfill \<rightleftharpoons> \<open>\hfill\<close>
br \<rightleftharpoons> \<open>\break\<close>
(*>*)
title*[tit::title]\<open>Philosophers may Dine - Definitively!\<close>
author*[safouan,email="\<open>safouan.taha@lri.fr\<close>",affiliation="\<open>LRI, CentraleSupelec\<close>"]\<open>Safouan Taha\<close>
author*[bu,email= "\<open>wolff@lri.fr\<close>",affiliation = "\<open>LRI, Université Paris-Saclay\<close>"]\<open>Burkhart Wolff\<close>
author*[lina,email="\<open>lina.ye@lri.fr\<close>",affiliation="\<open>LRI, Inria, LSV, CentraleSupelec\<close>"]\<open>Lina Ye\<close>
abstract*[abs, keywordlist="[\<open>Shallow Embedding\<close>,\<open>Process-Algebra\<close>,
\<open>Concurrency\<close>,\<open>Computational Models\<close>]"]
\<open> The theory of Communicating Sequential Processes going back to Hoare and Roscoe is still today
one of the reference theories for concurrent specification and computing. In 1997, a first
formalization in \<^isabelle> of the denotational semantics of the Failure/Divergence Model of
\<^csp> was undertaken; in particular, this model can cope with infinite alphabets, in contrast
to model-checking approaches limited to finite ones.
In this paper, we extend this theory to a significant degree by taking advantage of more powerful
automation of modern Isabelle version, which came even closer to recent developments in the
semantic foundation of \<^csp>.
More importantly, we use this formal development to analyse a family of refinement notions,
comprising classic and new ones. This analysis enabled us to derive a number of properties
that allow to deepen the understanding of these notions, in particular with respect to
specification decomposition principles in the infinite case. Better definitions allow to
clarify a number of obscure points in the classical literature, for example concerning the
relationship between deadlock- and livelock-freeness.
As a result, we have a modern environment for formal proofs of concurrent systems that allow
to combine general infinite processes with locally finite ones in a logically safe way.
We demonstrate a number of resulting verification-techniques for classical, generalized examples:
The CopyBuffer and Dijkstra's Dining Philosopher Problem of an arbitrary size.
If you consider citing this paper, please refer to @{cite "HOL-CSP-iFM2020"}.
\<close>
text\<open>\<close>
section*[introheader::introduction,main_author="Some(@{author ''bu''}::author)"]\<open> Introduction \<close>
text*[introtext::introduction, level="Some 1"]\<open>
Communicating Sequential Processes (\<^csp>) is a language to specify and verify patterns of
interaction of concurrent systems. Together with CCS and LOTOS, it belongs to the family of
\<^emph>\<open>process algebras\<close>. \<^csp>'s rich theory comprises denotational, operational and algebraic semantic
facets and has influenced programming languages such as Limbo, Crystal, Clojure and most notably
Golang @{cite "donovan2015go"}. \<^csp> has been applied in industry as a tool for specifying and
verifying the concurrent aspects of hardware systems, such as the T9000 transansputer
@{cite "Barret95"}.
The theory of \<^csp> was first described in 1978 in a book by Tony Hoare @{cite "Hoare:1985:CSP:3921"},
but has since evolved substantially @{cite "BrookesHR84" and "brookes-roscoe85" and "roscoe:csp:1998"}.
\<^csp> describes the most common communication and synchronization mechanisms with one single language
primitive: synchronous communication written \<open>_\<lbrakk>_\<rbrakk>_\<close>. \<^csp> semantics is described by a fully abstract
model of behaviour designed to be \<^emph>\<open>compositional\<close>: the denotational semantics of a process \<open>P\<close>
encompasses all possible behaviours of this process in the context of all possible environments
\<open>P \<lbrakk>S\<rbrakk> Env\<close> (where \<open>S\<close> is the set of \<open>atomic events\<close> both \<open>P\<close> and \<open>Env\<close> must synchronize). This
design objective has the consequence that two kinds of choice have to be distinguished: \<^vs>\<open>0.1cm\<close>
\<^enum> the \<^emph>\<open>external choice\<close>, written \<open>_\<box>_\<close>, which forces a process "to follow" whatever
the environment offers, and \<^vs>\<open>-0.4cm\<close>
\<^enum> the \<^emph>\<open>internal choice\<close>, written \<open>_\<sqinter>_\<close>, which imposes on the environment of a process
"to follow" the non-deterministic choices made.\<^vs>\<open>0.3cm\<close>
\<close>
text\<open> \<^vs>\<open>-0.6cm\<close>
Generalizations of these two operators \<open>\<box>x\<in>A. P(x)\<close> and \<open>\<Sqinter>x\<in>A. P(x)\<close> allow for modeling the concepts
of \<^emph>\<open>input\<close> and \<^emph>\<open>output\<close>: Based on the prefix operator \<open>a\<rightarrow>P\<close> (event \<open>a\<close> happens, then the process
proceeds with \<open>P\<close>), receiving input is modeled by \<open>\<box>x\<in>A. x\<rightarrow>P(x)\<close> while sending output is represented
by \<open>\<Sqinter>x\<in>A. x\<rightarrow>P(x)\<close>. Setting choice in the center of the language semantics implies that
deadlock-freeness becomes a vital property for the well-formedness of a process, nearly as vital
as type-checking: Consider two events \<open>a\<close> and \<open>b\<close> not involved in a process \<open>P\<close>, then
\<open>(a\<rightarrow>P \<box> b\<rightarrow>P) \<lbrakk>{a,b}\<rbrakk> (a\<rightarrow>P \<sqinter> b\<rightarrow>P)\<close> is deadlock free provided \<open>P\<close> is, while
\<open>(a\<rightarrow>P \<sqinter> b\<rightarrow>P) \<lbrakk>{a,b}\<rbrakk> (a\<rightarrow>P \<sqinter> b\<rightarrow>P)\<close> deadlocks (both processes can make "ruthlessly" an opposite choice,
but are required to synchronize).
Verification of \<^csp> properties has been centered around the notion of \<^emph>\<open>process refinement orderings\<close>,
most notably \<open>_\<sqsubseteq>\<^sub>F\<^sub>D_\<close> and \<open>_\<sqsubseteq>_\<close>. The latter turns the denotational domain of \<^csp> into a Scott cpo
@{cite "scott:cpo:1972"}, which yields semantics for the fixed point operator \<open>\<mu>x. f(x)\<close> provided
that \<open>f\<close> is continuous with respect to \<open>_\<sqsubseteq>_\<close>. Since it is possible to express deadlock-freeness and
livelock-freeness as a refinement problem, the verification of properties has been reduced
traditionally to a model-checking problem for finite set of events \<open>A\<close>.
We are interested in verification techniques for arbitrary event sets \<open>A\<close> or arbitrarily
parameterized processes. Such processes can be used to model dense-timed processes, processes
with dynamic thread creation, and processes with unbounded thread-local variables and buffers.
However, this adds substantial complexity to the process theory: when it comes to study the
interplay of different denotational models, refinement-orderings, and side-conditions for
continuity, paper-and-pencil proofs easily reach their limits of precision.
Several attempts have been undertaken to develop a formal theory in an interactive proof system,
mostly in Isabelle/HOL @{cite "Camilleri91" and "tej.ea:corrected:1997" and "IsobeRoggenbach2010"
and "DBLP:journals/afp/Noce16"}.
This paper is based on @{cite "tej.ea:corrected:1997"}, which has been the most comprehensive
attempt to formalize denotational \<^csp> semantics covering a part of Bill Roscoe's Book
@{cite "roscoe:csp:1998"}. Our contributions are as follows:
\<^item> we ported @{cite "tej.ea:corrected:1997"} from Isabelle93-7 and ancient
ML-written proof scripts to a modern Isabelle/HOL version and structured Isar proofs,
and extended it substantially,
\<^item> we introduced new refinement notions allowing a deeper understanding of the \<^csp>
Failure/Divergence model, providing some meta-theoretic clarifications,
\<^item> we used our framework to derive new types of decomposition rules and
stronger induction principles based on the new refinement notions, and
\<^item> we integrate this machinery into a number of advanced verification techniques, which we
apply to two generalized paradigmatic examples in the \<^csp> literature,
the CopyBuffer and Dining Philosophers@{footnote \<open>All proofs concerning the
HOL-CSP 2 core have been published in the Archive of Formal Proofs @{cite "HOL-CSP-AFP"};
all other proofs are available at
\<^url>\<open>https://gitlri.lri.fr/burkhart.wolff/hol-csp2.0\<close>. In this paper, all Isabelle proofs are
omitted.\<close>}.
\<close>
section*["pre"::technical,main_author="Some(@{author \<open>bu\<close>}::author)"]
\<open>Preliminaries\<close>
subsection*[cspsemantics::technical, main_author="Some(@{author ''bu''})"]\<open>Denotational \<^csp> Semantics\<close>
text\<open> The denotational semantics (following @{cite "roscoe:csp:1998"}) comes in three layers:
the \<^emph>\<open>trace model\<close>, the \<^emph>\<open>(stable) failures model\<close> and the \<^emph>\<open>failure/divergence model\<close>.
In the trace semantics model, a process \<open>P\<close> is denoted by a set of communication traces,
built from atomic events. A trace here represents a partial history of the communication
sequence occurring when a process interacts with its environment. For the two basic \<^csp>
processes \<open>Skip\<close> (successful termination) and \<open>Stop\<close> (just deadlock), the semantic function
\<open>\<T>\<close> of the trace model just gives the same denotation, \<^ie> the empty trace:
\<open>\<T>(Skip) = \<T>(Stop) = {[]}\<close>.
Note that the trace sets, representing all \<^emph>\<open>partial\<close> history, is in general prefix closed.\<close>
text*[ex1::math_example, status=semiformal, level="Some 1"] \<open>
Let two processes be defined as follows:\<^vs>\<open>0.2cm\<close>
\<^enum> \<open>P\<^sub>d\<^sub>e\<^sub>t = (a \<rightarrow> Stop) \<box> (b \<rightarrow> Stop)\<close>
\<^enum> \<open>P\<^sub>n\<^sub>d\<^sub>e\<^sub>t = (a \<rightarrow> Stop) \<sqinter> (b \<rightarrow> Stop)\<close>
\<close>
text\<open>These two processes \<open>P\<^sub>d\<^sub>e\<^sub>t\<close> and \<open>P\<^sub>n\<^sub>d\<^sub>e\<^sub>t\<close> cannot be distinguished by using
the trace semantics: \<open>\<T>(P\<^sub>d\<^sub>e\<^sub>t) = \<T>(P\<^sub>n\<^sub>d\<^sub>e\<^sub>t) = {[],[a],[b]}\<close>. To resolve this problem, Brookes @{cite "BrookesHR84"}
proposed the failures model, where communication traces were augmented with the
constraint information for further communication that is represented negatively as a refusal set.
A failure \<open>(t, X)\<close> is a pair of a trace \<open>t\<close> and a set of events \<open>X\<close> that a process can refuse if
any of the events in \<open>X\<close> were offered to him by the environment after performing the trace \<open>t\<close>.
The semantic function \<open>\<F>\<close> in the failures model maps a process to a set of refusals.
Let \<open>\<Sigma>\<close> be the set of events. Then, \<open>{([],\<Sigma>)} \<subseteq> \<F> Stop\<close> as the process \<open>Stop\<close> refuses all events.
For Example 1, we have \<open>{([],\<Sigma>\{a,b}),([a],\<Sigma>),([b],\<Sigma>)} \<subseteq> \<F> P\<^sub>d\<^sub>e\<^sub>t\<close>, while
\<open>{([],\<Sigma>\{a}),([],\<Sigma>\{b}),([a],\<Sigma>),([b],\<Sigma>)} \<subseteq> \<F> P\<^sub>n\<^sub>d\<^sub>e\<^sub>t\<close> (the \<open>_\<subseteq>_\<close> refers to the fact that
the refusals must be downward closed; we show only the maximal refusal sets here).
Thus, internal and external choice, also called \<^emph>\<open>nondeterministic\<close> and \<^emph>\<open>deterministic\<close>
choice, can be distinguished in the failures semantics.
However, it turns out that the failures model suffers from another deficiency with respect to
the phenomenon called infinite internal chatter or \<^emph>\<open>divergence\<close>.\<close>
text*[ex2::example, status=semiformal] \<open>
The following process \<open>P\<^sub>i\<^sub>n\<^sub>f\<close> is an infinite process that performs \<open>a\<close> infinitely
many times. However, using the \<^csp> hiding operator \<open>_\_\<close>, this activity is concealed:
\<^enum> \<open>P\<^sub>i\<^sub>n\<^sub>f = (\<mu> X. a \<rightarrow> X) \ {a}\<close>
\<close>
text\<open>where \<open>P\<^sub>i\<^sub>n\<^sub>f\<close> will be equivalent to \<open>\<bottom>\<close> in the process cpo ordering.
To distinguish divergences from the deadlock process, Brookes and Roscoe
proposed failure/divergence model to incorporate divergence traces @{cite "brookes-roscoe85"}.
A divergence trace is the one leading to a possible divergent behavior.
A well behaved process should be able to respond to its environment in a finite amount of time.
Hence, divergences are considered as a kind of a catastrophe in this model.
Thus, a process is represented by a failure set \<open>\<F>\<close>,
together with a set of divergence traces \<open>\<D>\<close>;
in our example, the empty trace \<open>[]\<close> belongs to \<open>\<D> P\<^sub>i\<^sub>n\<^sub>f\<close>.
The failure/divergence model has become the standard semantics for an enormous range of \<^csp>
research and the implementations of @{cite "fdr4" and "SunLDP09"}. Note, that the work
of @{cite "IsobeRoggenbach2010"} is restricted to a variant of the failures model only.
\<close>
subsection*["isabelleHol"::technical, main_author="Some(@{author ''bu''})"]\<open>Isabelle/HOL\<close>
text\<open> Nowadays, Isabelle/HOL is one of the major interactive theory development environments
@{cite "nipkow.ea:isabelle:2002"}. HOL stands for Higher-Order Logic, a logic based on simply-typed
\<open>\<lambda>\<close>-calculus extended by parametric polymorphism and Haskell-like type-classes.
Besides interactive and integrated automated proof procedures,
it offers code and documentation generators. Its structured proof language Isar is intensively used
in the plethora of work done and has been a key factor for the success of the Archive of Formal Proofs
(\<^url>\<open>https://www.isa-afp.org\<close>).
For the work presented here, one relevant construction is :
\<^item> \<^theory_text>\<open>typedef (\<alpha>\<^sub>1,...,\<alpha>\<^sub>n)t = E\<close>
It creates a fresh type that is isomorphic to a set \<open>E\<close> involving \<open>\<alpha>\<^sub>1,...,\<alpha>\<^sub>n\<close> types.
Isabelle/HOL performs a number of syntactic checks for these constructions that guarantee the logical
consistency of the defined constants or types relative to the axiomatic basis of HOL. The system
distribution comes with rich libraries comprising Sets, Numbers, Lists, etc. which are built in this
"conservative" way.
For this work, a particular library called \<^theory_text>\<open>HOLCF\<close> is intensively used. It provides classical
domain theory for a particular type-class \<open>\<alpha>::pcpo\<close>, \<^ie> the class of types \<open>\<alpha>\<close> for which
\<^enum> a least element \<open>\<bottom>\<close> is defined, and
\<^enum> a complete partial order \<open>_\<sqsubseteq>_\<close> is defined.
For these types, \<^theory_text>\<open>HOLCF\<close> provides a fixed-point operator \<open>\<mu>X. f X\<close> as well as the
fixed-point induction and other (automated) proof infrastructure. Isabelle's type-inference can
automatically infer, for example, that if \<open>\<alpha>::pcpo\<close>, then \<open>(\<beta> \<Rightarrow> \<alpha>)::pcpo\<close>. \<close>
section*["csphol"::technical,main_author="Some(@{author ''bu''}::author)", level="Some 2"]
\<open>Formalising Denotational \<^csp> Semantics in HOL \<close>
subsection*["processinv"::technical, main_author="Some(@{author ''bu''})"]
\<open>Process Invariant and Process Type\<close>
text\<open> First, we need a slight revision of the concept
of \<^emph>\<open>trace\<close>: if \<open>\<Sigma>\<close> is the type of the atomic events (represented by a type variable), then
we need to extend this type by a special event \<open>\<checkmark>\<close> (called "tick") signaling termination.
Thus, traces have the type \<open>(\<Sigma>\<uplus>\<checkmark>)\<^sup>*\<close>, written \<open>\<Sigma>\<^sup>\<checkmark>\<^sup>*\<close>; since \<open>\<checkmark>\<close> may only occur at the end of a trace,
we need to define a predicate \<open>front\<^sub>-tickFree t\<close> that requires from traces that \<open>\<checkmark>\<close> can only occur
at the end.
Second, in the traditional literature, the semantic domain is implicitly described by 9 "axioms"
over the three semantic functions \<open>\<T>\<close>, \<open>\<F>\<close> and \<open>\<D>\<close>.
Informally, these are:
\<^item> the initial trace of a process must be empty;
\<^item> any allowed trace must be \<open>front\<^sub>-tickFree\<close>;
\<^item> traces of a process are \<^emph>\<open>prefix-closed\<close>;
\<^item> a process can refuse all subsets of a refusal set;
\<^item> any event refused by a process after a trace \<open>s\<close> must be in a refusal set associated to \<open>s\<close>;
\<^item> the tick accepted after a trace \<open>s\<close> implies that all other events are refused;
\<^item> a divergence trace with any suffix is itself a divergence one
\<^item> once a process has diverged, it can engage in or refuse any sequence of events.
\<^item> a trace ending with \<open>\<checkmark>\<close> belonging to divergence set implies that its
maximum prefix without \<open>\<checkmark>\<close> is also a divergent trace.
More formally, a process \<open>P\<close> of the type \<open>\<Sigma> process\<close> should have the following properties:
@{cartouche [display, indent=10] \<open>([],{}) \<in> \<F> P \<and>
(\<forall> s X. (s,X) \<in> \<F> P \<longrightarrow> front_tickFree s) \<and>
(\<forall> s t . (s@t,{}) \<in> \<F> P \<longrightarrow> (s,{}) \<in> \<F> P) \<and>
(\<forall> s X Y. (s,Y) \<in> \<F> P \<and> X\<subseteq>Y \<longrightarrow> (s,X) \<in> \<F> P) \<and>
(\<forall> s X Y. (s,X) \<in> \<F> P \<and> (\<forall>c \<in> Y. ((s@[c],{}) \<notin> \<F> P)) \<longrightarrow> (s,X \<union> Y) \<in> \<F> P) \<and>
(\<forall> s X. (s@[\<checkmark>],{}) \<in> \<F> P \<longrightarrow> (s,X-{\<checkmark>}) \<in> \<F> P) \<and>
(\<forall> s t. s \<in> \<D> P \<and> tickFree s \<and> front_tickFree t \<longrightarrow> s@t \<in> \<D> P) \<and>
(\<forall> s X. s \<in> \<D> P \<longrightarrow> (s,X) \<in> \<F> P) \<and>
(\<forall> s. s@[\<checkmark>] \<in> \<D> P \<longrightarrow> s \<in> \<D> P)\<close>}
Our objective is to encapsulate this wishlist into a type constructed as a conservative
theory extension in our theory \<^holcsp>.
Therefore third, we define a pre-type for processes \<open>\<Sigma> process\<^sub>0\<close> by \<open> \<P>(\<Sigma>\<^sup>\<checkmark>\<^sup>* \<times> \<P>(\<Sigma>\<^sup>\<checkmark>)) \<times> \<P>(\<Sigma>\<^sup>\<checkmark>)\<close>.
Forth, we turn our wishlist of "axioms" above into the definition of a predicate \<open>is_process P\<close>
of type \<open>\<Sigma> process\<^sub>0 \<Rightarrow> bool\<close> deciding if its conditions are fulfilled. Since \<open>P\<close> is a pre-process,
we replace \<open>\<F>\<close> by \<open>fst\<close> and \<open>\<D>\<close> by \<open>snd\<close> (the HOL projections into a pair).
And last not least fifth, we use the following type definition:
\<^item> \<^theory_text>\<open>typedef '\<alpha> process = "{P :: '\<alpha> process\<^sub>0 . is_process P}"\<close>
Isabelle requires a proof for the existence of a witness for this set,
but this can be constructed in a straight-forward manner. Suitable definitions for
\<open>\<T>\<close>, \<open>\<F>\<close> and \<open>\<D>\<close> lifting \<open>fst\<close> and \<open>snd\<close> on the new \<open>'\<alpha> process\<close>-type allows to derive
the above properties for any \<open>P::'\<alpha> process\<close>. \<close>
subsection*["operator"::technical, main_author="Some(@{author ''lina''})"]
\<open>\<^csp> Operators over the Process Type\<close>
text\<open> Now, the operators of \<^csp> \<open>Skip\<close>, \<open>Stop\<close>, \<open>_\<sqinter>_\<close>, \<open>_\<box>_\<close>, \<open>_\<rightarrow>_\<close>,\<open>_\<lbrakk>_\<rbrakk>_\<close> etc.
for internal choice, external choice, prefix and parallel composition, can
be defined indirectly on the process-type. For example, for the simple case of the internal choice,
we construct it such that \<open>_\<sqinter>_\<close> has type \<open>'\<alpha> process \<Rightarrow> '\<alpha> process \<Rightarrow> '\<alpha> process\<close> and
such that its projection laws satisfy the properties \<open>\<F> (P \<sqinter> Q) = \<F> P \<union> \<F> Q\<close> and
\<open>\<D> (P \<sqinter> Q) = \<D> P \<union> \<D> Q\<close> required from @{cite "roscoe:csp:1998"}.
This boils down to a proof that an equivalent definition on the pre-process type \<open>\<Sigma> process\<^sub>0\<close>
maintains \<open>is_process\<close>, \<^ie> this predicate remains invariant on the elements of the semantic domain.
For example, we define \<open>_\<sqinter>_\<close> on the pre-process type as follows:
\<^item> \<^theory_text>\<open>definition "P \<sqinter> Q \<equiv> Abs_process(\<F> P \<union> \<F> Q , \<D> P \<union> \<D> Q)"\<close>
where \<open>Rep_process\<close> and \<open>Abs_process\<close> are the representation and abstraction morphisms resulting
from the type definition linking the type \<open>'\<alpha> process\<close> isomorphically to the set \<open>'\<alpha> process\<^sub>0\<close>.
The projection into \<^emph>\<open>failures\<close> is defined by \<open>\<F> = fst \<circ> Rep_process\<close>, whereas the
\<^emph>\<open>divergences\<close> are defined bz \<open>\<D> = snd \<circ> Rep_process\<close>. Proving the above properties for
\<open>\<F> (P \<sqinter> Q)\<close> and \<open>\<D> (P \<sqinter> Q)\<close> requires a proof that \<open>(\<F> P \<union> \<F> Q , \<D> P \<union> \<D> Q)\<close>
satisfies the well-formedness conditions of \<open>is_process\<close>, which is fairly simple in this case.
The definitional presentation of the \<^csp> process operators according to @{cite "roscoe:csp:1998"}
follows always this scheme. This part of the theory comprises around 2000 loc.
\<close>
subsection*["orderings"::technical, main_author="Some(@{author ''bu''})"]
\<open>Refinement Orderings\<close>
text\<open> \<^csp> is centered around the idea of process refinement; many critical properties,
even ones typically considered as "liveness-properties", can be expressed in terms of these, and
a conversion of processes in terms of (finite) labelled transition systems leads to effective
model-checking techniques based on graph-exploration. Essentially, a process \<open>P\<close> \<^emph>\<open>refines\<close>
another process \<open>Q\<close> if and only if it is more deterministic and more defined (has less divergences).
Consequently, each of the three semantics models (trace, failure and failure/divergence)
has its corresponding refinement orderings.\<close>
Theorem*[th1::"theorem", short_name="\<open>Refinement properties\<close>"]\<open>
What we are interested in this paper is the following refinement orderings for the
failure/divergence model.
\<^enum> \<open>P \<sqsubseteq>\<^sub>\<F>\<^sub>\<D> Q \<equiv> \<F> P \<supseteq> \<F> Q \<and> \<D> P \<supseteq> \<D> Q\<close>
\<^enum> \<open>P \<sqsubseteq>\<^sub>\<T>\<^sub>\<D> Q \<equiv> \<T> P \<supseteq> \<T> Q \<and> \<D> P \<supseteq> \<D> Q\<close>
\<^enum> \<open>P \<sqsubseteq>\<^sub>\<FF> Q \<equiv> \<FF> P \<supseteq> \<FF> Q, \<FF>\<in>{\<T>,\<F>,\<D>}\<close> \<close>
text\<open> Notice that in the \<^csp> literature, only \<open>\<sqsubseteq>\<^sub>\<F>\<^sub>\<D>\<close> is well studied for failure/divergence model.
Our formal analysis of different granularities on the refinement orderings
allows deeper understanding of the same semantics model. For example, \<open>\<sqsubseteq>\<^sub>\<T>\<^sub>\<D>\<close> turns
out to have in some cases better monotonicity properties and therefore allow for stronger proof
principles in \<^csp>. Furthermore, the refinement ordering \<open>\<sqsubseteq>\<^sub>\<F>\<close> analyzed here
is different from the classical
failure refinement in the literature that is studied for the stable failure model
@{cite "roscoe:csp:1998"}, where failures are only defined for stable
states, from which no internal progress is possible.
\<close>
subsection*["fixpoint"::technical, main_author="Some(@{author ''lina''})"]
\<open>Process Ordering and HOLCF\<close>
text\<open> For any denotational semantics, the fixed point theory giving semantics to systems
of recursive equations is considered as keystone. Its prerequisite is a complete partial ordering
\<open>_\<sqsubseteq>_\<close>. The natural candidate \<open>_\<sqsubseteq>\<^sub>\<F>\<^sub>\<D>_\<close> is unfortunately not complete for infinite \<open>\<Sigma>\<close> for the
generalized deterministic choice, and thus for the building block of the read-operations.
Roscoe and Brooks @{cite "Roscoe1992AnAO"} finally proposed another ordering, called the
\<^emph>\<open>process ordering\<close>, and restricted the generalized deterministic choice in a particular way such
that completeness could at least be assured for read-operations. This more complex ordering
is based on the concept \<^emph>\<open>refusals after\<close> a trace \<open>s\<close> and defined by \<open>\<R> P s \<equiv> {X | (s, X) \<in> \<F> P}\<close>.\<close>
Definition*[process_ordering, level= "Some 2", short_name="''process ordering''"]\<open>
We define \<open>P \<sqsubseteq> Q \<equiv> \<psi>\<^sub>\<D> \<and> \<psi>\<^sub>\<R> \<and> \<psi>\<^sub>\<M> \<close>, where
\<^enum> \<open>\<psi>\<^sub>\<D> = \<D> P \<supseteq> \<D> Q \<close>
\<^enum> \<open>\<psi>\<^sub>\<R> = s \<notin> \<D> P \<Rightarrow> \<R> P s = \<R> Q s\<close>
\<^enum> \<open>\<psi>\<^sub>\<M> = Mins(\<D> P) \<subseteq> \<T> Q \<close> \<close>
text\<open>The third condition \<open>\<psi>\<^sub>\<M>\<close> implies that the set of minimal divergent traces
(ones with no proper prefix that is also a divergence) in \<open>P\<close>, denoted by \<open>Mins(\<D> P)\<close>,
should be a subset of the trace set of \<open>Q\<close>.
%One may note that each element in \<open>Mins(\<D> P)\<close> do actually not contain the \<open>\<checkmark>\<close>,
%which can be deduced from the process invariants described
%in the precedent @{technical "processinv"}. This can be explained by the fact that we are not
%really concerned with what a process does after it terminates.
It is straight-forward to define the least element \<open>\<bottom>\<close> in this ordering by
\<open>\<F>(\<bottom>)= {(s,X). front_tickFree s}\<close> and \<open>\<D>(\<bottom>) = {s. front_tickFree s}\<close> \<close>
text\<open>While the original work @{cite "tej.ea:corrected:1997"} was based on an own --- and different ---
fixed-point theory, we decided to base HOL-\<^csp> 2 on HOLCF (initiated by @{cite "muller.ea:holcf:1999"}
and substantially extended in @{cite "huffman.ea:axiomatic:2005"}).
HOLCF is based on parametric polymorphism with type classes. A type class is actually a
constraint on a type variable by respecting certain syntactic and semantics
requirements. For example, a type class of partial ordering, denoted by \<open>\<alpha>::po\<close>, is restricted to
all types \<open>\<alpha>\<close> possessing a relation \<open>\<le>:\<alpha>\<times>\<alpha>\<rightarrow>bool\<close> that is reflexive, anti-symmetric, and transitive.
Isabelle possesses a construct that allows to establish, that the type \<open>nat\<close> belongs to this class,
with the consequence that all lemmas derived abstractly on \<open>\<alpha>::po\<close> are in particular applicable on
\<open>nat\<close>. The type class of \<open>po\<close> can be extended to the class of complete partial ordering \<open>cpo\<close>.
A \<open>po\<close> is said to be complete if all non-empty directed sets have a least upper bound (\<open>lub\<close>).
Finally the class of \<open>pcpo\<close> (Pointed cpo) is a \<open>cpo\<close> ordering that has a least element,
denoted by \<open>\<bottom>\<close>. For \<open>pcpo\<close> ordering, two crucial notions for continuity (\<open>cont\<close>) and fixed-point operator
(\<open>\<mu>X. f(X)\<close>) are defined in the usual way. A function from one \<open>cpo\<close> to another one is said
to be continuous if it distributes over the \<open>lub\<close> of all directed sets (or chains).
One key result of the fixed-point theory is the proof of the fixed-point theorem:
@{cartouche [display, indent=25] \<open>cont f \<Longrightarrow> \<mu>X. f(X) = f(\<mu>X. f(X))\<close>}
For most \<^csp> operators \<open>\<otimes>\<close> we derived rules of the form:
@{cartouche [display, indent=20] \<open>cont P \<Longrightarrow> cont Q \<Longrightarrow> cont(\<lambda>x. (P x) \<otimes> (Q x))\<close>}
These rules allow to automatically infer for any process term if it is continuous or not.
The port of HOL-CSP 2 on HOLCF implied that the derivation of the entire continuity rules
had to be completely re-done (3000 loc).\<close>
Theorem*[th2,short_name="\<open>Fixpoint Induction\<close>"]
\<open>HOL-CSP provides an important proof principle, the fixed-point induction:
@{cartouche [display, indent=5] \<open>cont f \<Longrightarrow> adm P \<Longrightarrow> P \<bottom> \<Longrightarrow> (\<And>X. P X \<Longrightarrow> P(f X)) \<Longrightarrow> P(\<mu>X. f X)\<close>}
\<close>
text\<open>Fixed-point induction of @{theorem th2} requires a small side-calculus for establishing the admissibility
of a predicate; basically, predicates are admissible if they are valid for any least upper bound
of a chain \<open>x\<^sub>1 \<sqsubseteq> x\<^sub>2 \<sqsubseteq> x\<^sub>3 ... \<close> provided that \<open>\<forall>i. P(x\<^sub>i)\<close>. It turns out that \<open>_\<sqsubseteq>_\<close> and \<open>_\<sqsubseteq>\<^sub>F\<^sub>D_\<close> as
well as all other refinement orderings that we introduce in this paper are admissible.
Fixed-point inductions are the main proof weapon in verifications, together with monotonicities
and the \<^csp> laws. Denotational arguments can be hidden as they are not needed in practical
verifications. \<close>
subsection*["law"::technical, main_author="Some(@{author ''lina''})"]
\<open>\<^csp> Rules: Improved Proofs and New Results\<close>
text\<open>The \<^csp> operators enjoy a number of algebraic properties: commutativity,
associativities, and idempotence in some cases. Moreover, there is a rich body of distribution
laws between these operators. Our new version HOL-CSP 2 not only shortens and restructures the
proofs of @{cite "tej.ea:corrected:1997"}; the code reduces to 8000 loc from 25000 loc. \<close>
Theorem*[th3, short_name="\<open>Examples of Derived Rules.\<close>"]\<open>
\<^item> \<open>\<box>x\<in>A\<union>B\<rightarrow>P(x) = (\<box>x\<in>A\<rightarrow>P x) \<box> (\<box>x\<in>B\<rightarrow>P x)\<close>
\<^item> \<open>A\<union>B\<subseteq>C \<Longrightarrow> (\<box>x\<in>A\<rightarrow>P x \<lbrakk>C\<rbrakk> \<box>x\<in>B\<rightarrow>Q x) = \<box>x\<in>A\<inter>B\<rightarrow>(P x \<lbrakk>C\<rbrakk> Q x)\<close>
\<^item> @{cartouche [display]\<open>A\<subseteq>C \<Longrightarrow> B\<inter>C={} \<Longrightarrow>
(\<box>x\<in>A\<rightarrow>P x \<lbrakk>C\<rbrakk> \<box>x\<in>B\<rightarrow>Q x) = \<box>x\<in>B\<rightarrow>(\<box>x\<in>A\<rightarrow>P x \<lbrakk>C\<rbrakk> Q x)\<close>}
\<^item> \<open>finite A \<Longrightarrow> A\<inter>C = {} \<Longrightarrow> ((P \<lbrakk>C\<rbrakk> Q) \ A) = ((P \ A) \<lbrakk>C\<rbrakk> (Q \ A)) ...\<close>\<close>
text\<open>The continuity proof of the hiding operator is notorious. The proof is known to involve the
classical König's lemma stating that every infinite tree with finite branching has an infinite path.
We adapt this lemma to our context as follows:
@{cartouche [display, indent=5]
\<open>infinite tr \<Longrightarrow> \<forall>i. finite{t. \<exists>t'\<in>tr. t = take i t'}
\<Longrightarrow> \<exists> f. strict_mono f \<and> range f \<subseteq> {t. \<exists>t'\<in>tr. t \<le> t'}\<close>}
in order to come up with the continuity rule: \<open>finite S \<Longrightarrow> cont P \<Longrightarrow> cont(\<lambda>X. P X \ S)\<close>.
The original proof had been drastically shortened by a factor 10 and important immediate steps
generalized: monotonicity, for example, could be generalized to the infinite case.
As for new laws, consider the case of \<open>(P \ A) \ B = P \ (A \<union> B)\<close> which is
stated in @{cite "Roscoe:UCS:2010"} without proof. In the new version, we managed to establish
this law which still need 450 lines of complex Isar code. However, it turned out that the original
claim is not fully true: it can only be established again by König's
lemma to build a divergent trace of \<open>P \ (A \<union> B)\<close> which requires \<open>A\<close> to be finite
(\<open>B\<close> can be arbitrary) in order to use it from a divergent trace of \<open>(P \ A) \ B\<close>
@{footnote \<open>In @{cite "Roscoe:UCS:2010"}, the authors point out that the laws involving the hiding
operator may fail when \<open>A\<close> is infinite; however, they fail to give the precise
conditions for this case.\<close>}. Again, we want to argue that the intricate number of
cases to be considered as well as their complexity makes pen and paper proofs
practically infeasible.
\<close>
section*["newResults"::technical,main_author="Some(@{author ''safouan''}::author)",
main_author="Some(@{author ''lina''}::author)", level= "Some 3"]
\<open>Theoretical Results on Refinement\<close>
text\<open>\<close>
subsection*["adm"::technical,main_author="Some(@{author ''safouan''}::author)",
main_author="Some(@{author ''lina''}::author)"]
\<open>Decomposition Rules\<close>
text\<open>
In our framework, we implemented the pcpo process refinement together with the five refinement
orderings introduced in @{technical "orderings"}. To enable fixed-point induction, we first have
the admissibility of the refinements.
@{cartouche [display, indent=7] \<open>cont u \<Longrightarrow> mono v \<Longrightarrow> adm(\<lambda>x. u x \<sqsubseteq>\<^sub>\<FF> v x) where \<FF>\<in>{\<T>,\<F>,\<D>,\<T>\<D>,\<F>\<D>}\<close>}
Next we analyzed the monotonicity of these refinement orderings, whose results are then used as
decomposition rules in our framework.
Some \<^csp> operators, such as multi-prefix and non-deterministic choice, are monotonic
under all refinement orderings, while others are not.
\<^item> External choice is not monotonic only under \<open>\<sqsubseteq>\<^sub>\<F>\<close>, with the following monotonicities proved:
@{cartouche [display,indent=5]
\<open>P \<sqsubseteq>\<^sub>\<FF> P' \<Longrightarrow> Q \<sqsubseteq>\<^sub>\<FF> Q' \<Longrightarrow> (P \<box> Q) \<sqsubseteq>\<^sub>\<FF> (P' \<box> Q') where \<FF>\<in>{\<T>,\<D>,\<T>\<D>,\<F>\<D>}\<close>}
\<^item> Sequence operator is not monotonic under \<open>\<sqsubseteq>\<^sub>\<F>\<close>, \<open>\<sqsubseteq>\<^sub>\<D>\<close> or \<open>\<sqsubseteq>\<^sub>\<T>\<close>:
@{cartouche [display,indent=5]
\<open>P \<sqsubseteq>\<^sub>\<FF> P'\<Longrightarrow> Q \<sqsubseteq>\<^sub>\<FF> Q' \<Longrightarrow> (P ; Q) \<sqsubseteq>\<^sub>\<FF> (P' ; Q') where \<FF>\<in>{\<T>\<D>,\<F>\<D>}\<close>}
All refinements are right-side monotonic but \<open>\<sqsubseteq>\<^sub>\<F>\<close>, \<open>\<sqsubseteq>\<^sub>\<D>\<close> and \<open>\<sqsubseteq>\<^sub>\<T>\<close> are not left-side monotonic,
which can be explained by the interdependence relationship of failure and divergence projections
for the first component. We thus proved:
\<^item> Hiding operator is not monotonic under \<open>\<sqsubseteq>\<^sub>\<D>\<close>:
@{cartouche [display,indent=5] \<open>P \<sqsubseteq>\<^sub>\<FF> Q \<Longrightarrow> P \ A \<sqsubseteq>\<^sub>\<FF> Q \ A where \<FF>\<in>{\<T>,\<F>,\<T>\<D>,\<F>\<D>}\<close>}
Intuitively, for the divergence refinement of the hiding operator, there may be
some trace \<open>s\<in>\<T> Q\<close> and \<open>s\<notin>\<T> P\<close> such that it becomes divergent in \<open>Q \ A\<close> but
not in \<open>P \ A\<close>.
\<^item> Parallel composition is not monotonic under \<open>\<sqsubseteq>\<^sub>\<F>\<close>, \<open>\<sqsubseteq>\<^sub>\<D>\<close> or \<open>\<sqsubseteq>\<^sub>\<T>\<close>:
@{cartouche [display,indent=5] \<open>P \<sqsubseteq>\<^sub>\<FF> P' \<Longrightarrow> Q \<sqsubseteq>\<^sub>\<FF> Q' \<Longrightarrow> (P \<lbrakk>A\<rbrakk> Q) \<sqsubseteq>\<^sub>\<FF> (P' \<lbrakk>A\<rbrakk> Q') where \<FF>\<in>{\<T>\<D>,\<F>\<D>}\<close>}
The failure and divergence projections of this operator are also interdependent, similar to the
sequence operator. Hence, this operator is not monotonic with \<open>\<sqsubseteq>\<^sub>\<F>\<close>, \<open>\<sqsubseteq>\<^sub>\<D>\<close> and \<open>\<sqsubseteq>\<^sub>\<T>\<close>, but monotonic
when their combinations are considered. \<close>
subsection*["processes"::technical,main_author="Some(@{author ''safouan''}::author)",
main_author="Some(@{author ''lina''}::author)"]
\<open>Reference Processes and their Properties\<close>
text\<open>
We now present reference processes that exhibit basic behaviors, introduced in
fundamental \<^csp> works @{cite "Roscoe:UCS:2010"}. The process \<open>RUN A\<close> always
accepts events from \<open>A\<close> offered by the environment. The process \<open>CHAOS A\<close> can always choose to
accept or reject any event of \<open>A\<close>. The process \<open>DF A\<close> is the most non-deterministic deadlock-free
process on \<open>A\<close>, \<^ie>, it can never refuse all events of \<open>A\<close>.
To handle termination better, we added two new processes \<open>CHAOS\<^sub>S\<^sub>K\<^sub>I\<^sub>P\<close> and \<open>DF\<^sub>S\<^sub>K\<^sub>I\<^sub>P\<close>.
%Note that we do not redefine \<open>RUN\<close> with \<open>SKIP\<close> because this process is supposed to never terminate,
%thus must be without it.
\<close>
(*<*) (* a test ...*)
text*[X22 ::math_content, level="Some 2" ]\<open>\<open>RUN A \<equiv> \<mu> X. \<box> x \<in> A \<rightarrow> X\<close> \<close>
text*[X32::"definition", level="Some 2", mcc=defn]\<open>\<open>CHAOS A \<equiv> \<mu> X. (STOP \<sqinter> (\<box> x \<in> A \<rightarrow> X))\<close> \<close>
Definition*[X42, level="Some 2"]\<open>\<open>CHAOS\<^sub>S\<^sub>K\<^sub>I\<^sub>P A \<equiv> \<mu> X. (SKIP \<sqinter> STOP \<sqinter> (\<box> x \<in> A \<rightarrow> X))\<close> \<close>
Definition*[X52::"definition", level="Some 2"]\<open>\<open>CHAOS\<^sub>S\<^sub>K\<^sub>I\<^sub>P A \<equiv> \<mu> X. (SKIP \<sqinter> STOP \<sqinter> (\<box> x \<in> A \<rightarrow> X))\<close> \<close>
text\<open> The \<open>RUN\<close>-process defined @{math_content X22} represents the process that accepts all
events, but never stops nor deadlocks. The \<open>CHAOS\<close>-process comes in two variants shown in
@{definition X32} and @{definition X42} @{definition X52}: the process that non-deterministically
stops or accepts any offered event, whereas \<open>CHAOS\<^sub>S\<^sub>K\<^sub>I\<^sub>P\<close> can additionally terminate.\<close>
(*>*)
Definition*[X2, level="Some 2"]\<open>\<open>RUN A \<equiv> \<mu> X. \<box> x \<in> A \<rightarrow> X\<close> \<close>
Definition*[X3, level="Some 2"]\<open>\<open>CHAOS A \<equiv> \<mu> X. (STOP \<sqinter> (\<box> x \<in> A \<rightarrow> X))\<close> \<close>
Definition*[X4, level="Some 2"]\<open>\<open>CHAOS\<^sub>S\<^sub>K\<^sub>I\<^sub>P A \<equiv> \<mu> X. (SKIP \<sqinter> STOP \<sqinter> (\<box> x \<in> A \<rightarrow> X))\<close>\<close>
Definition*[X5, level="Some 2"]\<open>\<open>DF A \<equiv> \<mu> X. (\<sqinter> x \<in> A \<rightarrow> X)\<close> \<close>
Definition*[X6, level="Some 2"]\<open>\<open>DF\<^sub>S\<^sub>K\<^sub>I\<^sub>P A \<equiv> \<mu> X. ((\<sqinter> x \<in> A \<rightarrow> X) \<sqinter> SKIP)\<close> \<close>
text\<open>In the following, we denote \<open> \<R>\<P> = {DF\<^sub>S\<^sub>K\<^sub>I\<^sub>P, DF, RUN, CHAOS, CHAOS\<^sub>S\<^sub>K\<^sub>I\<^sub>P}\<close>.
All five reference processes are divergence-free.
which was proven by using a particular lemma \<open>\<D> (\<mu> x. f x) = \<Inter>\<^sub>i\<^sub>\<in>\<^sub>\<nat> \<D> (f\<^sup>i \<bottom>)\<close>.
@{cartouche
[display,indent=8] \<open> D (\<PP> UNIV) = {} where \<PP> \<in> \<R>\<P> and UNIV is the set of all events\<close>
}
Regarding the failure refinement ordering, the set of failures \<open>\<F> P\<close> for any process \<open>P\<close> is
a subset of \<open>\<F> (CHAOS\<^sub>S\<^sub>K\<^sub>I\<^sub>P UNIV)\<close>.
@{cartouche [display, indent=25] \<open>CHAOS\<^sub>S\<^sub>K\<^sub>I\<^sub>P UNIV \<sqsubseteq>\<^sub>\<F> P\<close>}
Furthermore, the following 5 relationships were demonstrated from monotonicity results and
a denotational proof.
\<close>
Corollary*[co1::"corollary", short_name="\<open>Corollaries on reference processes.\<close>",level="Some 2"]
\<open> \<^hfill> \<^br> \<^vs>\<open>-0.3cm\<close>
\<^enum> \<open>CHAOS\<^sub>S\<^sub>K\<^sub>I\<^sub>P A \<sqsubseteq>\<^sub>\<F> CHAOS A\<close>
\<^enum> \<open>CHAOS\<^sub>S\<^sub>K\<^sub>I\<^sub>P A \<sqsubseteq>\<^sub>\<F> DF\<^sub>S\<^sub>K\<^sub>I\<^sub>P A\<close>
\<^enum> \<open>CHAOS A \<sqsubseteq>\<^sub>\<F> DF A\<close>
\<^enum> \<open>DF\<^sub>S\<^sub>K\<^sub>I\<^sub>P A \<sqsubseteq>\<^sub>\<F> DF A\<close>
\<^enum> \<open>DF A \<sqsubseteq>\<^sub>\<F> RUN A\<close> \<^vs>\<open>0.3cm\<close>
where 1 and 2 are immediate, and where 4 and 5 are directly obtained from our monotonicity
results while 3 requires an argument over the denotational space.
Thanks to transitivity, we can derive other relationships.\<close>
text\<open> Lastly, regarding trace refinement, for any process P,
its set of traces \<open>\<T> P\<close> is a subset of \<open>\<T> (CHAOS\<^sub>S\<^sub>K\<^sub>I\<^sub>P UNIV)\<close> and of \<open>\<T> (DF\<^sub>S\<^sub>K\<^sub>I\<^sub>P UNIV)\<close> as well.
%As we already proved that \<open>CHAOS\<^sub>S\<^sub>K\<^sub>I\<^sub>P\<close> covers all failures,
%we can immediately infer that it also covers all traces.
%The \<open>DF\<^sub>S\<^sub>K\<^sub>I\<^sub>P\<close> case requires a longer denotational proof.
\<^enum> \<open>CHAOS\<^sub>S\<^sub>K\<^sub>I\<^sub>P UNIV \<sqsubseteq>\<^sub>\<T> P\<close>
\<^enum> \<open>DF\<^sub>S\<^sub>K\<^sub>I\<^sub>P UNIV \<sqsubseteq>\<^sub>\<T> P\<close>
\<close>
text\<open>
Recall that a concurrent system is considered as being deadlocked if no component can make any
progress, caused for example by the competition for resources. In opposition to deadlock,
processes can enter infinite loops inside a sub-component without never ever interact with their
environment again ("infinite internal chatter"); this situation called divergence or livelock.
Both properties are not just a sanity condition; in \<^csp>, they play a central role for
verification. For example, if one wants to establish that a protocol implementation \<open>IMPL\<close> satisfies
a non-deterministic specification \<open>SPEC\<close> it suffices to ask if \<open>IMPL || SPEC\<close> is deadlock-free.
In this setting, \<open>SPEC\<close> becomes a kind of observer that signals non-conformance of \<open>IMPL\<close> by
deadlock.
In the literature, deadlock and lifelock are phenomena that are often
handled separately. One contribution of our work is establish their precise relationship inside
the Failure/Divergence Semantics of \<^csp>.\<close>
Definition*[X10::"definition", level="Some 2"]\<open> \<open>deadlock\<^sub>-free P \<equiv> DF\<^sub>S\<^sub>K\<^sub>I\<^sub>P UNIV \<sqsubseteq>\<^sub>\<F> P\<close> \<close>
text\<open>\<^noindent> A process \<open>P\<close> is deadlock-free if and only if after any trace \<open>s\<close> without \<open>\<checkmark>\<close>, the union of \<open>\<checkmark>\<close>
and all events of \<open>P\<close> can never be a refusal set associated to \<open>s\<close>, which means that \<open>P\<close> cannot
be deadlocked after any non-terminating trace.
\<close>
Theorem*[T1, short_name="\<open>DF definition captures deadlock-freeness\<close>", level="Some 2"]
\<open> \<^hfill> \<^br> \<open>deadlock_free P \<longleftrightarrow> (\<forall>s\<in>\<T> P. tickFree s \<longrightarrow> (s, {\<checkmark>}\<union>events_of P) \<notin> \<F> P)\<close> \<close>
Definition*[X11, level="Some 2"]\<open> \<open>livelock\<^sub>-free P \<equiv> \<D> P = {} \<close> \<close>
text\<open> Recall that all five reference processes are livelock-free.
We also have the following lemmas about the
livelock-freeness of processes:
\<^enum> \<open>livelock\<^sub>-free P \<longleftrightarrow> \<PP> UNIV \<sqsubseteq>\<^sub>\<D> P where \<PP> \<in> \<R>\<P>\<close>
\<^enum> \<open>livelock\<^sub>-free P \<longleftrightarrow> DF\<^sub>S\<^sub>K\<^sub>I\<^sub>P UNIV \<sqsubseteq>\<^sub>\<T>\<^sub>\<D> P \<longleftrightarrow> CHAOS\<^sub>S\<^sub>K\<^sub>I\<^sub>P UNIV \<sqsubseteq>\<^sub>\<T>\<^sub>\<D> P\<close>
\<^enum> \<open>livelock\<^sub>-free P \<longleftrightarrow> CHAOS\<^sub>S\<^sub>K\<^sub>I\<^sub>P UNIV \<sqsubseteq>\<^sub>\<F>\<^sub>\<D> P\<close>
\<close>
text\<open>
Finally, we proved the following theorem that confirms the relationship between the two vital
properties:
\<close>
Theorem*[T2, short_name="''DF implies LF''", level="Some 2"]
\<open> \<open>deadlock_free P \<longrightarrow> livelock_free P\<close> \<close>
text\<open>
This is totally natural, at a first glance, but surprising as the proof of deadlock-freeness only
requires failure refinement \<open>\<sqsubseteq>\<^sub>\<F>\<close> (see @{definition \<open>X10\<close>}) where divergence traces are mixed within
the failures set. Note that the existing tools in the literature normally detect these two phenomena
separately, such as FDR for which checking livelock-freeness is very costly.
In our framework, deadlock-freeness of a given system
implies its livelock-freeness. However, if a system is not deadlock-free,
then it may still be livelock-free. % This makes sense since livelocks are worse than deadlocks.
\<close>
section*["advanced"::technical,main_author="Some(@{author ''safouan''}::author)",level="Some 3"]
\<open>Advanced Verification Techniques\<close>
text\<open>
Based on the refinement framework discussed in @{technical "newResults"}, we will now
turn to some more advanced proof principles, tactics and verification techniques.
We will demonstrate them on two paradigmatic examples well-known in the \<^csp> literature:
The CopyBuffer and Dijkstra's Dining Philosophers. In both cases, we will exploit
the fact that HOL-CSP 2 allows for reasoning over infinite \<^csp>; in the first case,
we reason over infinite alphabets approaching an old research objective:
exploiting data-independence @{cite "Lazic1998ASS" and "AnZhangYou14"} in process
verification. In the latter case, we present an approach to a verification of a parameterized
architecture, in this case a ring-structure of arbitrary size.
\<close>
subsection*["illustration"::technical,main_author="Some(@{author ''safouan''}::author)", level="Some 3"]
\<open>The General CopyBuffer Example\<close>
text\<open>
We consider the paradigmatic copy buffer example @{cite "Hoare:1985:CSP:3921" and "Roscoe:UCS:2010"}
that is characteristic for a specification of a prototypical process and its
implementation. It is used extensively in the \<^csp> literature to illustrate the interplay
of communication, component concealment and fixed-point operators.
The process \<open>COPY\<close> is a specification of a one size buffer, that receives elements from the channel
\<open>left\<close> of arbitrary type \<open>\<alpha>\<close> and outputs them on the channel \<open>right\<close>:
@{theory_text [display,indent=5] \<open>
datatype \<alpha> events = left \<alpha> | right \<alpha> | mid \<alpha> | ack
definition COPY \<equiv> (\<mu> X. left?x \<rightarrow> (right!x \<rightarrow> X))\<close>}
\<^noindent> From our HOL-CSP 2 theory that establishes the continuity of all \<^csp> operators, we deduce that
such a fixed-point process \<open>COPY\<close> exists and follows the unrolling rule below:
@{theory_text [display,indent=5] \<open>lemma COPY = (left?x \<rightarrow> (right!x \<rightarrow> COPY))\<close>}
\<^noindent> We set \<open>SEND\<close> and \<open>REC\<close> in parallel but in a row sharing a middle channel
\<open>mid\<close> and synchronizing with an \<open>ack\<close> event. Then, we hide all exchanged events between these two
processes and we call the resulting process \<open>SYSTEM\<close>:
@{theory_text [display,indent=5] \<open>
definition SEND \<equiv> (\<mu> X. left?x \<rightarrow> (mid!x \<rightarrow> (ack \<rightarrow> X)))
definition REC \<equiv> (\<mu> X. mid?x \<rightarrow> (right!x \<rightarrow> (ack \<rightarrow> X)))
definition SYN \<equiv> (range mid) \<union> {ack}
definition "SYSTEM \<equiv> (SEND \<lbrakk>SYN\<rbrakk> REC) \\ SYN"\<close>}
\<^noindent> We want to verify that \<open>SYSTEM\<close> implements \<open>COPY\<close>. As shown below, we apply fixed-point induction
to prove that \<open>SYSTEM\<close> refines \<open>COPY\<close> using the \<open>pcpo\<close> process ordering \<open>\<sqsubseteq>\<close> that implies all other
refinement orderings. We state:
@{theory_text [display,indent=5] \<open>lemma: COPY \<sqsubseteq> SYSTEM\<close>}
and apply fixed-point induction over \<open>COPY\<close>; this leaves us to the three subgoals:
\<^enum> \<open>adm (\<lambda>a. a \<sqsubseteq> (SEND \<lbrakk>SYN\<rbrakk> REC) \ SYN)\<close>
\<^enum> \<open>\<bottom> \<sqsubseteq> (SEND \<lbrakk>SYN\<rbrakk> REC) \ SYN\<close>
\<^enum> @{cartouche [display]\<open>P \<sqsubseteq> (SEND \<lbrakk>SYN\<rbrakk> REC) \ SYN \<Longrightarrow>
left?x \<rightarrow> right!x \<rightarrow> P \<sqsubseteq> (SEND \<lbrakk>SYN\<rbrakk> REC) \ SYN\<close>}
The first two sub-proofs are automatic simplification proofs; the third requires unfolding
\<open>SEND\<close> and \<open>REC\<close> one step and applying the algebraic laws. No denotational
semantics reasoning is necessary here; it is just an induct-simplify proof consisting
of 2 lines proof-script involving the derived algebraic laws of \<^csp>.
After proving that \<open>SYSTEM\<close> implements \<open>COPY\<close> for arbitrary alphabets, we aim to profit from this
first established result to check which relations \<open>SYSTEM\<close> has wrt. to the reference processes of
@{technical "processes"}. Thus, we prove that \<open>COPY\<close> is deadlock-free which implies livelock-free,
(proof by fixed-induction similar to \<open>lemma: COPY \<sqsubseteq> SYSTEM\<close>), from which we can immediately infer
from transitivity that \<open>SYSTEM\<close> is. Using refinement relations, we killed four birds with one stone
as we proved the deadlock-freeness and the livelock-freeness for both \<open>COPY\<close> and \<open>SYSTEM\<close> processes.
These properties hold for arbitrary alphabets and for infinite ones in particular.
@{theory_text [display, indent=5] \<open>
lemma DF UNIV \<sqsubseteq> COPY
corollary deadlock_free COPY
and livelock_free COPY
and deadlock_free SYSTEM
and livelock_free SYSTEM\<close>}
\<close>
subsection*["inductions"::technical,main_author="Some(@{author ''safouan''}::author)"]
\<open>New Fixed-Point Inductions\<close>
text\<open>
The copy buffer refinement proof \<open>DF UNIV \<sqsubseteq> COPY\<close> is a typical one step induction proof
with two goals:
\<open>base: \<bottom> \<sqsubseteq> Q\<close> and \<open>1-ind: X \<sqsubseteq> Q \<Longrightarrow> (_ \<rightarrow> X) \<sqsubseteq> Q\<close>. Now, if unfolding the fixed-point process \<open>Q\<close>
reveals two steps, the second goal becomes
\<open>X \<sqsubseteq> Q \<Longrightarrow> _ \<rightarrow> X \<sqsubseteq> _ \<rightarrow> _ \<rightarrow> Q\<close>. Unfortunately, this way, it becomes improvable
using monotonicities rules.
We need here a two-step induction of the form \<open>base0: \<bottom> \<sqsubseteq> Q\<close>, \<open>base1: _ \<rightarrow> \<bottom> \<sqsubseteq> Q\<close> and
\<open>2-ind: X \<sqsubseteq> Q \<Longrightarrow> _ \<rightarrow> _ \<rightarrow> X \<sqsubseteq> _ \<rightarrow> _ \<rightarrow> Q\<close> to have a sufficiently powerful induction scheme.
For this reason, we derived a number of alternative induction schemes (which are not available
in the HOLCF library), which are also relevant for our final Dining Philophers example.
These are essentially adaptions of k-induction schemes applied to domain-theoretic
setting (so: requiring \<open>f\<close> continuous and \<open>P\<close> admissible; these preconditions are
skipped here):\<^vs>\<open>0.2cm\<close>
\<^item> \<open>... \<Longrightarrow> \<forall>i<k. P (f\<^sup>i \<bottom>) \<Longrightarrow> (\<forall>X. (\<forall>i<k. P (f\<^sup>i X)) \<longrightarrow> P (f\<^sup>k X)) \<Longrightarrow> P (\<mu>X. f X)\<close>
\<^item> \<open>... \<Longrightarrow> \<forall>i<k. P (f\<^sup>i \<bottom>) \<Longrightarrow> (\<forall>X. P X \<longrightarrow> P (f\<^sup>k X)) \<Longrightarrow> P (\<mu>X. f X)\<close>
\<^noindent> In the latter variant, the induction hypothesis is weakened to skip \<open>k\<close> steps. When possible,
it reduces the goal size.
Another problem occasionally occurring in refinement proofs happens when the right side term
involves more than one fixed-point process (\<^eg> \<open>P \<lbrakk>A\<rbrakk> Q \<sqsubseteq> S\<close>). In this situation,
we need parallel fixed-point inductions. The HOLCF library offers only a basic one:
\<^item> \<open>... \<Longrightarrow> P \<bottom> \<bottom> \<Longrightarrow> (\<forall>X Y. P X Y \<Longrightarrow> P (f X) (g Y)) \<Longrightarrow> P (\<mu>X. f X) (\<mu>X. g X)\<close>
\<^noindent> This form does not help in cases like in \<open>P \<lbrakk>\<emptyset>\<rbrakk> Q \<sqsubseteq> S\<close> with the interleaving operator on the
right-hand side. The simplifying law is:
@{cartouche [display, indent=3]\<open>
(\<box>x\<in>A\<rightarrow>P x \<lbrakk>\<emptyset>\<rbrakk> \<box>x\<in>B\<rightarrow>Q x) = (\<box>x\<in>A \<rightarrow> ( P x \<lbrakk>\<emptyset>\<rbrakk> \<box>x\<in>B \<rightarrow> Q x)
\<box> (\<box>x\<in>B \<rightarrow> (\<box>x\<in>A \<rightarrow> P x \<lbrakk>\<emptyset>\<rbrakk> Q x))\<close>}
Here, \<open>(f X \<lbrakk>\<emptyset>\<rbrakk> g Y)\<close> does not reduce to the \<open>(X \<lbrakk>\<emptyset>\<rbrakk> Y)\<close> term but to two terms \<open>(f X \<lbrakk>\<emptyset>\<rbrakk> Y)\<close> and
\<open>(X \<lbrakk>\<emptyset>\<rbrakk> g Y)\<close>.
To handle these cases, we developed an advanced parallel induction scheme and we proved its
correctness:
\<^item> @{cartouche [display] \<open>... \<Longrightarrow> (\<forall>Y. P \<bottom> Y) \<Longrightarrow> (\<forall>X. P X \<bottom>)
\<Longrightarrow> \<forall>X Y. (P X Y \<and> P (f X) Y \<and> P X (g Y)) \<longrightarrow> P (f X) (g Y)
\<Longrightarrow> P (\<mu>X. f X) (\<mu>X. g X)\<close>}
\<^noindent> which allows for a "independent unroling" of the fixed-points in these proofs.
The astute reader may notice here that if the induction step is weakened (having more hypothesises),
the base steps require enforcement.
\<close>
subsection*["norm"::technical,main_author="Some(@{author ''safouan''}::author)"]
\<open>Normalization\<close>
text\<open>
Our framework can reason not only over infinite alphabets, but also over processes parameterized
over states with an arbitrarily rich structure. This paves the way for the following technique,
that trades potentially complex process structure against equivalent simple processes with
potentially rich state.
Roughly similar to labelled transition systems, we provide for deterministic \<^csp> processes a normal
form that is based on an explicit state. The general schema of normalized processes is defined as
follows:
@{cartouche [display,indent=20] \<open>P\<^sub>n\<^sub>o\<^sub>r\<^sub>m\<lbrakk>\<tau>\<^sub>,\<upsilon>\<rbrakk> \<equiv> \<mu> X. (\<lambda>\<sigma>. \<box>e\<in>(\<tau> \<sigma>) \<rightarrow> X(\<upsilon> \<sigma> e))\<close>}
where \<open>\<tau>\<close> is a transition function which returns the set of events that can be triggered from
the current state \<open>\<sigma>\<close> given as parameter.
The update function \<open>\<upsilon>\<close> takes two parameters \<open>\<sigma>\<close> and an event \<open>e\<close> and returns the new state.
This normal form is closed under deterministic and communication operators.
The advantage of this format is that we can mimick the well-known product automata construction
for an arbitrary number of synchronized processes under normal form.
We only show the case of the synchronous product of two processes: \<close>
Theorem*[T3, short_name="\<open>Product Construction\<close>", level="Some 2"]\<open>
Parallel composition translates to normal form:
@{cartouche [display,indent=5]\<open>(P\<^sub>n\<^sub>o\<^sub>r\<^sub>m\<lbrakk>\<tau>\<^sub>1,\<upsilon>\<^sub>1\<rbrakk> \<sigma>\<^sub>1) || (P\<^sub>n\<^sub>o\<^sub>r\<^sub>m\<lbrakk>\<tau>\<^sub>2,\<upsilon>\<^sub>2\<rbrakk> \<sigma>\<^sub>2) =
P\<^sub>n\<^sub>o\<^sub>r\<^sub>m\<lbrakk>\<lambda>(\<sigma>\<^sub>1,\<sigma>\<^sub>2). \<tau>\<^sub>1 \<sigma>\<^sub>1 \<inter> \<tau>\<^sub>2 \<sigma>\<^sub>2 , \<lambda>(\<sigma>\<^sub>1,\<sigma>\<^sub>2).\<lambda>e.(\<upsilon>\<^sub>1 \<sigma>\<^sub>1 e, \<upsilon>\<^sub>2 \<sigma>\<^sub>2 e)\<rbrakk> (\<sigma>\<^sub>1,\<sigma>\<^sub>2)\<close>}
\<close>
text\<open> The generalization of this rule for a list of \<open>(\<tau>,\<upsilon>)\<close>-pairs is straight-forward,
albeit the formal proof is not. The application of the generalized form is a corner-stone of the
proof of the general dining philosophers problem illustrated in the subsequent section.
Another advantage of normalized processes is the possibility to argue over the reachability of
states via the closure \<open>\<RR>\<close>, which is defined inductively over:
\<^item> \<open>\<sigma> \<in> \<RR> \<tau> \<upsilon> \<sigma>\<close>
\<^item> \<open>\<sigma> \<in> \<RR> \<tau> \<upsilon> \<sigma>\<^sub>0 \<Longrightarrow> e \<in> \<tau> \<sigma> \<Longrightarrow> \<upsilon> \<sigma> e \<in> \<RR> \<tau> \<upsilon> \<sigma>\<^sub>0\<close>
Thus, normalization leads to a new characterization of deadlock-freeness inspired
from automata theory. We formally proved the following theorem:\<close>
text*[T4::"theorem", short_name="\<open>DF vs. Reacheability\<close>", level="Some 2"]
\<open> If each reachable state \<open>s \<in> (\<RR> \<tau> \<upsilon>)\<close> has outgoing transitions,
the \<^csp> process is deadlock-free:
@{cartouche [display,indent=10] \<open>\<forall>\<sigma> \<in> (\<RR> \<tau> \<upsilon> \<sigma>\<^sub>0). \<tau> \<sigma> \<noteq> {} \<Longrightarrow> deadlock_free (P\<^sub>n\<^sub>o\<^sub>r\<^sub>m\<lbrakk>\<tau>,\<upsilon>\<rbrakk> \<sigma>\<^sub>0)\<close>}
\<close>
text\<open> This theorem allows for establishing properties such as deadlock-freeness by
completely abstracting from \<^csp> theory; these are arguments that only involve inductive reasoning
over the transition function.
Summing up, our method consists of four stages:
\<^enum> we construct normalized versions of component processes and prove them
equivalent to their counterparts,
\<^enum> we state an invariant over the states/variables,
\<^enum> we prove by induction over \<open>\<RR>\<close> that it holds on all reachable states, and finally
\<^enum> we prove that this invariant guarantees the existence of outgoing transitions.
\<close>
subsection*["dining_philosophers"::technical,main_author="Some(@{author ''safouan''}::author)",level="Some 3"]
\<open>Generalized Dining Philosophers\<close>
text\<open> The dining philosophers problem is another paradigmatic example in the \<^csp> literature
often used to illustrate synchronization problems between an arbitrary number of concurrent systems.
It is an example for a process scheme for which general properties are desirable in order
to inherit them for specific instances.
The general dining philosopher problem for an arbitrary \<open>N\<close> is presented in HOL-CSP 2 as follows
%@{footnote \<open>The dining philosopher problem is also distributed with FDR4, where \<open>N = 6\<close>.\<close>}:
@{theory_text [display,indent=5]
\<open>datatype dining_event = picks (phil::nat) (fork::nat)
| putsdown (phil::nat) (fork::nat)
| eat (phil::nat)
definition LPHIL0 \<equiv> (\<mu> X. (picks 0 (N-1) \<rightarrow> (picks 0 0 \<rightarrow> eat 0 \<rightarrow>
(putsdown 0 0 \<rightarrow> (putsdown 0 (N-1) \<rightarrow> X)))))
definition RPHIL i \<equiv> (\<mu> X. (picks i i \<rightarrow> (picks i (i-1) \<rightarrow> eat i \<rightarrow>
(putsdown i (i-1) \<rightarrow> (putsdown i i \<rightarrow> X)))))
definition FORK i \<equiv> (\<mu> X. (picks i i \<rightarrow> (putsdown i i \<rightarrow> X))
\<box>(picks (i+1)%N i \<rightarrow>(putsdown (i+1)%N i \<rightarrow> X)))
definition "PHILs \<equiv> LPHIL0 ||| (|||\<^sub>i\<^sub>\<in>\<^sub>1\<^sub>.\<^sub>.\<^sub>N RPHIL i)"
definition "FORKs \<equiv> |||\<^sub>i\<^sub>\<in>\<^sub>0\<^sub>.\<^sub>.\<^sub>N FORK i"
definition DINING \<equiv> FORKs \<lbrakk>picks, putsdown\<rbrakk> PHILs\<close>}
% this should be theory_text, but is rejected for lexical reasons
Note that both philosophers and forks are pairwise independent
but both synchronize on \<open>picks\<close> and \<open>putsdown\<close> events. The philosopher of index 0 is left-handed
whereas the other \<open>N-1\<close> philosophers are right-handed. We want to prove that any configuration
is deadlock-free for an arbitrary number N.
First, we put the fork process under normal form. It has three states:
(1) on the table, (2) picked by the right philosopher or (3) picked by the left one:
@{theory_text [display,indent=5]
\<open>definition trans\<^sub>f i \<sigma> \<equiv> if \<sigma> = 0 then {picks i i, picks (i+1)%N i}
else if \<sigma> = 1 then {putsdown i i}
else if \<sigma> = 2 then {putsdown (i+1)%N i}
else {}
definition upd\<^sub>f i \<sigma> e \<equiv> if e = (picks i i) then 1
else if e = (picks (i+1)%N) i then 2
else 0
definition FORK\<^sub>n\<^sub>o\<^sub>r\<^sub>m i \<equiv> P\<^sub>n\<^sub>o\<^sub>r\<^sub>m\<lbrakk>trans\<^sub>f i, upd\<^sub>f i\<rbrakk> \<close>}
To validate our choice for the states, transition function \<open>trans\<^sub>f\<close> and update function \<open>upd\<^sub>f\<close>,
we prove that they are equivalent to the original process components: \<open>FORK\<^sub>n\<^sub>o\<^sub>r\<^sub>m i = FORK i\<close>.
The anti-symmetry of refinement breaks this down to the two refinement proofs \<open>FORK\<^sub>n\<^sub>o\<^sub>r\<^sub>m i \<sqsubseteq> FORK i\<close>
and \<open>FORK i \<sqsubseteq> FORK\<^sub>n\<^sub>o\<^sub>r\<^sub>m i\<close>, which are similar to the CopyBuffer example shown in
@{technical "illustration"}. Note, again, that this fairly automatic induct-simplify-proof just
involves reasoning on the derived algebraic rules, not any reasoning on the level of the
denotational semantics.
%Second we prove that the normal form process is equivalent to the original fork process
%by proving refinements in both directions. We note here that the first refinement \<open>FORK\<^sub>n\<^sub>o\<^sub>r\<^sub>m i \<sqsubseteq> FORK i\<close>
%requires a two steps induction as unfolding the original fixed-point process brings two steps
%\<open>FORK i = picks \<rightarrow> putsdown \<rightarrow> FORK i\<close>. After that we apply the same method
%to get the philosopher process under a normal form.
Thanks to @{theorem \<open>T3\<close>}, we obtain normalized processes
for \<open>FORKs\<close>, \<open>PHILs\<close> and \<open>DINING\<close>:
@{theory_text [display,indent=5]
\<open>definition "trans\<^sub>F \<equiv> \<lambda>fs. (\<Inter>\<^sub>i\<^sub><\<^sub>N. trans\<^sub>f i (fs!i))"
definition upd\<^sub>F \<equiv> \<lambda>fs e. let i=(fork e) in fs[i:=(upd\<^sub>f i (fs!i) e)]
lemma FORKs = P\<^sub>n\<^sub>o\<^sub>r\<^sub>m\<lbrakk>trans\<^sub>F, upd\<^sub>F\<rbrakk> ...
lemma PHILS = P\<^sub>n\<^sub>o\<^sub>r\<^sub>m\<lbrakk>trans\<^sub>P, upd\<^sub>P\<rbrakk> ...
definition trans\<^sub>D \<equiv> \<lambda>(ps,fs). (trans\<^sub>P ps) \<inter> (trans\<^sub>F fs)
definition upd\<^sub>D \<equiv> \<lambda>(ps,fs) e. (upd\<^sub>P ps e, upd\<^sub>F fs e)
lemma DINING = P\<^sub>n\<^sub>o\<^sub>r\<^sub>m\<lbrakk>trans\<^sub>D, upd\<^sub>D\<rbrakk> \<close>}
The variable \<open>ps\<close> stands for the list of philosophers states and \<open>fs\<close>
for the list of forks states, both are of size \<open>N\<close>. The pair \<open>(ps, fs)\<close>
encodes the whole dining table state over which we need to define an invariant to ensure
that no blocking state is reachable and thus the dining philosophers problem is deadlock-free.
As explained before, the proof is based on abstract reasoning over relations
independent from the \<^csp> context.
The last steps towards our goal are the following definitions and lemmas:
@{theory_text [display,indent=5]
\<open>definition INV\<^sub>D\<^sub>I\<^sub>N\<^sub>I\<^sub>N\<^sub>G ps fs \<equiv> (\<forall>i. ((fs!i=1) \<leftrightarrow> ps!i \<noteq> 0) \<and> ... )
lemma (ps,fs) \<in> \<RR> trans\<^sub>D upd\<^sub>D \<Longrightarrow> INV\<^sub>D\<^sub>I\<^sub>N\<^sub>I\<^sub>N\<^sub>G ps fs ...
lemma INV\<^sub>D\<^sub>I\<^sub>N\<^sub>I\<^sub>N\<^sub>G ps fs \<Longrightarrow> trans\<^sub>D (ps, fs) \<noteq> {} ...
corollary deadlock_free DINING \<close>}
To sum up, we proved once and for all that the dining philosophers problem is deadlock free
for an arbitrary number \<open>N \<ge> 2\<close>. Common model-checkers like PAT and FDR fail to answer
for a dozen of philosophers (on a usual machine) due to the exponential combinatorial explosion.
Furthermore, our proof is fairly stable against modifications like adding non synchronized events like
thinking or sitting down in contrast to model-checking techniques. \<close>
section*["relatedwork"::technical,main_author="Some(@{author ''lina''}::author)",level="Some 3"]
\<open>Related work\<close>
text\<open>
The theory of \<^csp> has attracted a lot of interest from the eighties on, and is still
a fairly active research area, both
as a theoretical device as well as a modelling language to analyze complex concurrent systems.
It is therefore not surprising that attempts to its formalisation had been undertaken early
with the advent of interactive theorem proving systems supporting higher-order logic
@{cite "Camilleri91" and "tej.ea:corrected:1997" and "10.1007/978-3-642-16690-7_9"
and "10.1007/978-3-642-27705-4_20" and "DBLP:conf/concur/IsobeR06" }, where
especially the latter allows for some automated support for refinement proofs
based on induction. However, HOL-CSP2 is based on a failure/divergence model, while
@{cite "DBLP:conf/concur/IsobeR06"} is based on stable failures, which can infer
deadlock-freeness only under the assumption that no lifelock occurred; In our view,
this is a too strong assumption for both the theory as well as the tool.
In the 90ies, research focused on automated verification tools for \<^csp>, most notably on
FDR~@{cite "fdr4"}. It relies on an operational \<^csp> semantics, allowing for a conversion of processes
into labelled transition systems, where the states are normalized by the "laws" derived from the
denotational semantics.
For finite event sets, refinement proofs can be reduced to graph inclusion problems. With
efficient compression techniques, such as bisimulation, elimination and factorization by
semantic equivalence @{cite "Roscoe95"}, FDR was used to analyze some industrial applications.
However, such a model checker can not handle infinite cases and do not scale to large systems.
%%Another similar model checking tool @{cite "SunLDP09"} implemented some more optimization techniques,
%%such as partial order reduction, symmetric reduction, and parallel model checking, but is also
%%restricted to the finite case.
The fundamental limits of automated decision procedures for data and processes has been known
very early on: Undecidability of parameterized model checking was proven by reduction to
non-halting of Turing machines @{cite "Suzuki88"}. However, some forms of
well-structured transitions systems, could be demonstrated to be decidable
@{cite "FinkelS01" and "BloemJKKRVW16"}.
HOL-CSP2 is a fully abstract model for the failure/divergence model; as a HOL theory, it is therefore
a "relative complete proof theory" both for infinite data as well as number of components.
(see @{cite "andrews2002introduction"} for relative completeness).
Encouraged by the progress of SMT solvers which support some infinite types,
notably (fixed arrays of) integers or reals, and limited forms of formulas over these types,
SMT-based model-checkers represent the current main-stream to parametric model-checking.
This extends both to LTL-style model-checkers for Promela-like languages
@{cite "Cubicle" and "ByMC"} as well as process-algebra alikes
@{cite "AntoninoGR19" and "AntoninoGR16" and "BensalemGLNSY11"}.
However, the usual limitations persist: the translation to SMT is hardly certifiable and
the solvers are still not able to handle non-linear computations; moreover, they fail
to elaborate inductive proofs on data if necessary in refinement proofs.
Some systems involve approximation techniques in order to make the formal verification of
concurrent systems scalable; results are therefore inherently imprecise and require
meta-level arguments assuring their truth in a specific application context.
For example, in @{cite "AntoninoGR19"}, the synchronization analysis techniques try to
prove the unreachability of a system state by showing that components cannot agree
on the order or on the number of times they participate on system rules.
Even with such over-approximation, the finiteness restriction on the number of components
persists.
Last but not least, SMT-based tools only focusing on bounded model-checking like
@{cite "Kind2" and "JKind"} use k-induction and quite powerful invariant generation
techniques but are still far from scalable techniques. While it is difficult to make
any precise argument on the scalability for HOL-CSP 2, we argue that we have no data-type
restrictions (events may have realvector-, function- or even process type) as well as
restrictions on the structure of components. None of our paradigmatic examples can
be automatically proven with any of the discussed SMT techniques without restrictions.
\<close>
section*["conclusion"::conclusion,main_author="Some(@{author ''bu''}::author)"]\<open>Conclusion\<close>
text\<open>We presented a formalisation of the most comprehensive semantic model for \<^csp>, a 'classical'
language for the specification and analysis of concurrent systems studied in a rich body of
literature. For this purpose, we ported @{cite "tej.ea:corrected:1997"} to a modern version
of Isabelle, restructured the proofs, and extended the resulting theory of the language
substantially. The result HOL-CSP 2 has been submitted to the Isabelle AFP @{cite "HOL-CSP-AFP"},
thus a fairly sustainable format accessible to other researchers and tools.
We developed a novel set of deadlock - and livelock inference proof principles based on
classical and denotational characterizations. In particular, we formally investigated the relations
between different refinement notions in the presence of deadlock - and livelock; an area where
traditional \<^csp> literature skates over the nitty-gritty details. Finally, we demonstrated how to
exploit these results for deadlock/livelock analysis of protocols.
We put a large body of abstract \<^csp> laws and induction principles together to form
concrete verification technologies for generalized classical problems, which have been considered
so far from the perspective of data-independence or structural parametricity. The underlying novel
principle of "trading rich structure against rich state" allows to convert processes
into classical transition systems for which established invariant techniques become applicable.
Future applications of HOL-CSP 2 could comprise a combination to model checkers, where our theory
with its derived rules is used to certify the output of a model-checker over \<^csp>. In our experience,
generated labelled transition systems may be used to steer inductions or to construct
the normalized processes \<open>P\<^sub>n\<^sub>o\<^sub>r\<^sub>m\<lbrakk>\<tau>\<^sub>,\<upsilon>\<rbrakk>\<close> automatically, thus combining efficient finite reasoning
over finite sub-systems with globally infinite systems in a logically safe way.
\<close>
(*<*)
subsection*[bib::bibliography]\<open>References\<close>
close_monitor*[this]
end
(*>*)

View File

@ -1,91 +0,0 @@
theory PikeOS_ST (*Security Target *)
imports "Isabelle_DOF-Ontologies.CC_v3_1_R5"
begin
section \<open>ST PikeOS\<close>
open_monitor*[stpkos::ST_MNT]
section*[pkosstintrosec::st_ref_cls]\<open> ST Introduction \<close>
open_monitor*[PikosIntro::ST_INTRO_MNT]
subsection*[pkosstrefsubsec::st_ref_cls]\<open> ST Reference \<close>
text*[pkosstref::st_ref_cls, title="''PikeOS Security Target''", st_version ="(0,4,5)",
authors= "[]", st_date= "''29072020''"]
\<open>This document is the @{docitem st} for the Common Criteria evaluation of PikeOS.
It complies with the Common Criteria for Information Technology Security Evaluation
Version 3.1 Revision 4.\<close>
subsection*[pkossttoerefsubsec::st_ref_cls]\<open>TOE Reference\<close>
text*[pkostoeref::toe_ref_cls, dev_name="''''", toe_name="''PikeOS''",
toe_version= "(0,3,4)", prod_name="Some ''S3725''"]
\<open>The @{docitem (unchecked) toeDef} is the operating system PikeOS version 3.4
running on the microprocessor family x86 hosting different applications.
The @{docitem (unchecked) toeDef} is referenced as PikeOS 3.4 base
product build S3725 for Linux and Windows development host with PikeOS 3.4
Certification Kit build S4250 and PikeOS 3.4 Common Criteria Kit build S4388.\<close>
subsection*[pkossttoeovrvwsubsec::st_ref_cls]\<open> TOE Overview \<close>
text*[pkosovrw1::toe_ovrw_cls]\<open>The @{docitem (unchecked) \<open>toeDef\<close> } is a special kind of operating
system, that allows to effectively separate
different applications running on the same platform from each other. The TOE can host
user applications that can also be operating systems. User applications can also be
malicious, and even in that case the TOE ensures that malicious user applications are
harming neither the TOE nor other applications in other partitions. The TOE will be
installed and run on a hardware platform (e.g. embedded systems).
The TOE is intended to be used as a component (the separation kernel) in MILS systems.
MILS (Multiple Independent Levels of Security) systems are explained in .
The TOE controls usage of memory, devices, processors, and communication channels
to ensure complete separation of user applications and to prevent unexpected
interference between user applications. The TOE enforces restrictions on the
communication between the separated user applications as specified by the configuration
data.
The major security services provided by the TOE are:
Separation in space of applications hosted in different partitions from each other
and from the PikeOS operating system according to the configuration data by
Page 3 of 44using the underlying hardware,
2086 Separation in time of applications hosted in different partitions from each other
and from the PikeOS operating system according to the configuration data,
Provision and management of communication objects,
 Management of and access to the TOE and TOE data,
 PikeOS operating system self-protection and accuracy of security functionality,
 Generation and treatment of audit data according to the configuration data.\<close>
text*[pkosovrw2::toe_ovrw_cls, toe_type="''OS separation kernel''"]
\<open>The TOE is a special kind of operating system providing a separation kernel with real-
time support.
The typical life cycle phases for this TOE type are development (source code
development), manufacturing (compilation to binary), system integration (by the system
integrator), installation (by the system operator), and finally, operational use (by the
system operator). Operational use of the TOE is explicitly in the focus of this ST. A
security evaluation/certification according to the assurance package chosen in this ST
(see Section 2.3 “Package Claim” below) involves all these life cycle phases.\<close>
text*[pkosdesc::toe_desc_cls]\<open>\<close>
close_monitor*[PikosIntro]
open_monitor*[PikosCCLM::CONF_CLAIMS_MNT]
close_monitor*[PikosCCLM]
open_monitor*[PikosSPD::SEC_PROB_DEF_MNT]
close_monitor*[PikosSPD]
open_monitor*[PikosSO::SEC_OBJ_MNT]
close_monitor*[PikosSO]
open_monitor*[PikosSR::SEC_REQ_MNT]
close_monitor*[PikosSR]
close_monitor*[stpkos]
end

View File

@ -1,4 +0,0 @@
session "PikeOS_study" = "Isabelle_DOF-Ontologies" +
options [document = false]
theories
"PikeOS_ST"

View File

@ -1 +0,0 @@
PikeOS_study

View File

@ -1 +0,0 @@
mini_odo

View File

@ -1,18 +0,0 @@
session "mini_odo" = "Isabelle_DOF-Ontologies" +
options [document = pdf, document_output = "output", document_build = dof]
sessions
"Physical_Quantities"
theories
"mini_odo"
document_theories
"Isabelle_DOF-Ontologies.CENELEC_50128"
document_files
"dof_session.tex"
"preamble.tex"
"root.bib"
"root.mst"
"figures/df-numerics-encshaft.png"
"figures/odometer.jpeg"
"figures/three-phase-odo.pdf"
"figures/wheel-df.png"

View File

@ -1,3 +0,0 @@
\input{mini_odo}
\input{CENELEC_50128}

Binary file not shown.

Before

Width:  |  Height:  |  Size: 27 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 407 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 23 KiB

View File

@ -1,22 +0,0 @@
%% Copyright (C) 2018 The University of Sheffield
%% 2018 The University of Paris-Saclay
%% 2019 The University of Exeter
%%
%% License:
%% This program can be redistributed and/or modified under the terms
%% of the LaTeX Project Public License Distributed from CTAN
%% archives in directory macros/latex/base/lppl.txt; either
%% version 1.3c of the License, or (at your option) any later version.
%% OR
%% The 2-clause BSD-style license.
%%
%% SPDX-License-Identifier: LPPL-1.3c+ OR BSD-2-Clause
%% This is a placeholder for user-specific configuration and packages.
\usepackage{wrapfig}
\usepackage{paralist}
\usepackage{numprint}
\newcommand{\fixIsarList}{\vspace{-\topsep}\vspace{-\baselineskip}\mbox{}\\[0pt]\noindent}
\author{}
\title{}

View File

@ -1,884 +0,0 @@
@STRING{pub-springer={Springer} }
@STRING{pub-springer:adr="" }
@STRING{s-lncs = "LNCS" }
@Manual{ wenzel:isabelle-isar:2017,
title = {The Isabelle/Isar Reference Manual},
author = {Makarius Wenzel},
year = 2017,
note = {Part of the Isabelle distribution.}
}
@Book{ adler:r:2010,
abstract = {Presents a guide to the R computer language, covering such
topics as the user interface, packages, syntax, objects,
functions, object-oriented programming, data sets, lattice
graphics, regression models, and bioconductor.},
added-at = {2013-01-10T22:39:38.000+0100},
address = {Sebastopol, CA},
author = {Adler, Joseph},
isbn = {9780596801700 059680170X},
keywords = {R},
publisher = {O'Reilly},
refid = 432987461,
title = {R in a nutshell},
year = 2010
}
@InCollection{ wenzel.ea:building:2007,
abstract = {We present the generic system framework of
Isabelle/Isarunderlying recent versions of Isabelle. Among
other things, Isar provides an infrastructure for Isabelle
plug-ins, comprising extensible state components and
extensible syntax that can be bound to tactical ML
programs. Thus the Isabelle/Isar architecture may be
understood as an extension and refinement of the
traditional LCF approach, with explicit infrastructure for
building derivative systems. To demonstrate the technical
potential of the framework, we apply it to a concrete
formalmethods tool: the HOL-Z 3.0 environment, which is
geared towards the analysis of Z specifications and formal
proof of forward-refinements.},
author = {Makarius Wenzel and Burkhart Wolff},
booktitle = {TPHOLs 2007},
editor = {Klaus Schneider and Jens Brandt},
language = {USenglish},
acknowledgement={none},
pages = {352--367},
publisher = pub-springer,
address = pub-springer:adr,
number = 4732,
series = s-lncs,
title = {Building Formal Method Tools in the {Isabelle}/{Isar}
Framework},
doi = {10.1007/978-3-540-74591-4_26},
year = 2007
}
@Misc{ w3c:ontologies:2015,
title = {Ontologies},
organisation = {W3c},
url = {https://www.w3.org/standards/semanticweb/ontology},
year = 2018
}
@Book{ boulanger:cenelec-50128:2015,
author = {Boulanger, Jean-Louis},
title = {{CENELEC} 50128 and {IEC} 62279 Standards},
publisher = {Wiley-ISTE},
year = 2015,
address = {Boston},
note = {The reference on the standard.}
}
@Booklet{ cc:cc-part3:2006,
bibkey = {cc:cc-part3:2006},
key = {Common Criteria},
institution = {Common Criteria},
language = {USenglish},
month = sep,
year = 2006,
public = {yes},
title = {Common Criteria for Information Technology Security
Evaluation (Version 3.1), {Part} 3: Security assurance
components},
note = {Available as document
\href{http://www.commoncriteriaportal.org/public/files/CCPART3V3.1R1.pdf}
{CCMB-2006-09-003}},
number = {CCMB-2006-09-003},
acknowledgement={brucker, 2007-04-24}
}
@Book{ nipkow.ea:isabelle:2002,
author = {Tobias Nipkow and Lawrence C. Paulson and Markus Wenzel},
title = {Isabelle/HOL---A Proof Assistant for Higher-Order Logic},
publisher = pub-springer,
address = pub-springer:adr,
series = s-lncs,
volume = 2283,
doi = {10.1007/3-540-45949-9},
abstract = {This book is a self-contained introduction to interactive
proof in higher-order logic (\acs{hol}), using the proof
assistant Isabelle2002. It is a tutorial for potential
users rather than a monograph for researchers. The book has
three parts.
1. Elementary Techniques shows how to model functional
programs in higher-order logic. Early examples involve
lists and the natural numbers. Most proofs are two steps
long, consisting of induction on a chosen variable followed
by the auto tactic. But even this elementary part covers
such advanced topics as nested and mutual recursion. 2.
Logic and Sets presents a collection of lower-level tactics
that you can use to apply rules selectively. It also
describes Isabelle/\acs{hol}'s treatment of sets, functions
and relations and explains how to define sets inductively.
One of the examples concerns the theory of model checking,
and another is drawn from a classic textbook on formal
languages. 3. Advanced Material describes a variety of
other topics. Among these are the real numbers, records and
overloading. Advanced techniques are described involving
induction and recursion. A whole chapter is devoted to an
extended example: the verification of a security protocol. },
year = 2002,
acknowledgement={brucker, 2007-02-19},
bibkey = {nipkow.ea:isabelle:2002},
tags = {noTAG},
clearance = {unclassified},
timestap = {2008-05-26}
}
@InProceedings{ wenzel:asynchronous:2014,
author = {Makarius Wenzel},
title = {Asynchronous User Interaction and Tool Integration in
Isabelle/{PIDE}},
booktitle = {Interactive Theorem Proving (ITP)},
pages = {515--530},
year = 2014,
crossref = {klein.ea:interactive:2014},
doi = {10.1007/978-3-319-08970-6_33},
timestamp = {Sun, 21 May 2017 00:18:59 +0200},
abstract = { Historically, the LCF tradition of interactive theorem
proving was tied to the read-eval-print loop, with
sequential and synchronous evaluation of prover commands
given on the command-line. This user-interface technology
was adequate when R. Milner introduced his LCF proof
assistant in the 1970-ies, but it severely limits the
potential of current multicore hardware and advanced IDE
front-ends.
Isabelle/PIDE breaks this loop and retrofits the
read-eval-print phases into an asynchronous model of
document-oriented proof processing. Instead of feeding a
sequence of individual commands into the prover process,
the primary interface works via edits over a family of
document versions. Execution is implicit and managed by the
prover on its own account in a timeless and stateless
manner. Various aspects of interactive proof checking are
scheduled according to requirements determined by the
front-end perspective on the proof document, while making
adequate use of the CPU resources on multicore hardware on
the back-end.
Recent refinements of Isabelle/PIDE provide an explicit
concept of asynchronous print functions over existing proof
states. This allows to integrate long-running or
potentially non-terminating tools into the document-model.
Applications range from traditional proof state output
(which may consume substantial time in interactive
development) to automated provers and dis-provers that
report on existing proof document content (e.g.
Sledgehammer, Nitpick, Quickcheck in Isabelle/HOL).
Moreover, it is possible to integrate query operations via
additional GUI panels with separate input and output (e.g.
for Sledgehammer or find-theorems). Thus the Prover IDE
provides continuous proof processing, augmented by add-on
tools that help the user to continue writing proofs. }
}
@Proceedings{ klein.ea:interactive:2014,
editor = {Gerwin Klein and Ruben Gamboa},
title = {Interactive Theorem Proving - 5th International
Conference, {ITP} 2014, Held as Part of the Vienna Summer
of Logic, {VSL} 2014, Vienna, Austria, July 14-17, 2014.
Proceedings},
series = s-lncs,
volume = 8558,
publisher = pub-springer,
year = 2014,
doi = {10.1007/978-3-319-08970-6},
isbn = {978-3-319-08969-0}
}
@InProceedings{ bezzecchi.ea:making:2018,
title = {Making Agile Development Processes fit for V-style
Certification Procedures},
author = {Bezzecchi, S. and Crisafulli, P. and Pichot, C. and Wolff,
B.},
booktitle = {{ERTS'18}},
abstract = {We present a process for the development of safety and
security critical components in transportation systems
targeting a high-level certification (CENELEC 50126/50128,
DO 178, CC ISO/IEC 15408).
The process adheres to the objectives of an ``agile
development'' in terms of evolutionary flexibility and
continuous improvement. Yet, it enforces the overall
coherence of the development artifacts (ranging from proofs
over tests to code) by a particular environment (CVCE).
In particular, the validation process is built around a
formal development based on the interactive theorem proving
system Isabelle/HOL, by linking the business logic of the
application to the operating system model, down to code and
concrete hardware models thanks to a series of refinement
proofs.
We apply both the process and its support in CVCE to a
case-study that comprises a model of an odometric service
in a railway-system with its corresponding implementation
integrated in seL4 (a secure kernel for which a
comprehensive Isabelle development exists). Novel
techniques implemented in Isabelle enforce the coherence of
semi-formal and formal definitions within to specific
certification processes in order to improve their
cost-effectiveness. },
pdf = {https://www.lri.fr/~wolff/papers/conf/2018erts-agile-fm.pdf},
year = 2018,
series = {ERTS Conference Proceedings},
location = {Toulouse}
}
@Misc{ owl2012,
title = {OWL 2 Web Ontology Language},
note = {\url{https://www.w3.org/TR/owl2-overview/}, Document
Overview (Second Edition)},
author = {World Wide Web Consortium}
}
@Misc{ protege,
title = {Prot{\'e}g{\'e}},
note = {\url{https://protege.stanford.edu}},
year = 2018
}
@Misc{ cognitum,
title = {Fluent Editor},
note = {\url{http://www.cognitum.eu/Semantics/FluentEditor/}},
year = 2018
}
@Misc{ neon,
title = {The NeOn Toolkit},
note = {\url{http://neon-toolkit.org}},
year = 2018
}
@Misc{ owlgred,
title = {OWLGrEd},
note = {\url{http://owlgred.lumii.lv/}},
year = 2018
}
@Misc{ rontorium,
title = {R Language Package for FLuent Editor (rOntorion)},
note = {\url{http://www.cognitum.eu/semantics/FluentEditor/rOntorionFE.aspx}},
year = 2018
}
@InProceedings{ DBLP:conf/mkm/BlanchetteHMN15,
author = {Jasmin Christian Blanchette and Maximilian P. L. Haslbeck
and Daniel Matichuk and Tobias Nipkow},
title = {Mining the Archive of Formal Proofs},
booktitle = {Intelligent Computer Mathematics - International
Conference, {CICM} 2015, Washington, DC, USA, July 13-17,
2015, Proceedings},
pages = {3--17},
year = 2015,
url = {https://doi.org/10.1007/978-3-319-20615-8\_1},
doi = {10.1007/978-3-319-20615-8\_1},
timestamp = {Fri, 02 Nov 2018 09:40:47 +0100},
biburl = {https://dblp.org/rec/bib/conf/mkm/BlanchetteHMN15},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
@InCollection{ brucker.ea:isabelle-ontologies:2018,
abstract = {While Isabelle is mostly known as part of Isabelle/HOL (an
interactive theorem prover), it actually provides a
framework for developing a wide spectrum of applications. A
particular strength of the Isabelle framework is the
combination of text editing, formal verification, and code
generation.\\\\Up to now, Isabelle's document preparation
system lacks a mechanism for ensuring the structure of
different document types (as, e.g., required in
certification processes) in general and, in particular,
mechanism for linking informal and formal parts of a
document.\\\\In this paper, we present Isabelle/DOF, a
novel Document Ontology Framework on top of Isabelle.
Isabelle/DOF allows for conventional typesetting \emph{as
well} as formal development. We show how to model document
ontologies inside Isabelle/DOF, how to use the resulting
meta-information for enforcing a certain document
structure, and discuss ontology-specific IDE support.},
address = {Heidelberg},
author = {Achim D. Brucker and Idir Ait-Sadoune and Paolo Crisafulli
and Burkhart Wolff},
booktitle = {Conference on Intelligent Computer Mathematics (CICM)},
doi = {10.1007/978-3-319-96812-4_3},
keywords = {Isabelle/Isar, HOL, Ontologies},
language = {USenglish},
location = {Hagenberg, Austria},
number = 11006,
pdf = {https://www.brucker.ch/bibliography/download/2018/brucker.ea-isabelle-ontologies-2018.pdf},
publisher = {Springer-Verlag},
series = {Lecture Notes in Computer Science},
title = {Using the {Isabelle} Ontology Framework: Linking the
Formal with the Informal},
url = {https://www.brucker.ch/bibliography/abstract/brucker.ea-isabelle-ontologies-2018},
year = 2018
}
@InCollection{ brucker.wolff:isa_def-design-impl:2019,
abstract = {DOF is a novel framework for defining ontologies and enforcing them during document
development and evolution. A major goal of DOF the integrated development of formal
certification documents (e.g., for Common Criteria or CENELEC 50128) that require
consistency across both formal and informal arguments. To support a consistent
development of formal and informal parts of a document, we implemented Isabelle/DOF,
an implementation of DOF on top of the formal methods framework Isabelle/HOL. A
particular emphasis is put on a deep integration into Isabelles IDE, which allows
for smooth ontology development as well as immediate ontological feedback during
the editing of a document.
In this paper, we give an in-depth presentation of the design concepts of DOFs
Ontology Definition Language (ODL) and key aspects of the technology of its
implementation. Isabelle/DOF is the first ontology lan- guage supporting
machine-checked links between the formal and informal parts in an LCF-style
interactive theorem proving environment. Sufficiently annotated, large documents
can easily be developed collaboratively, while ensuring their consistency, and the
impact of changes (in the formal and the semi-formal content) is tracked automatically.},
address = {Heidelberg},
author = {Achim D. Brucker and Burkhart Wolff},
booktitle = {International Conference on Software Engineering and Formal Methods},
keywords = {Isabelle/Isar, HOL, Ontologies, Documentation},
language = {USenglish},
location = {Oslo, Austria},
number = "to appear",
publisher = {Springer-Verlag},
series = {Lecture Notes in Computer Science},
title = {{I}sabelle/{DOF}: {D}esign and {I}mplementation},
year = 2019
}
@InProceedings{ DBLP:conf/itp/Wenzel14,
author = {Makarius Wenzel},
title = {Asynchronous User Interaction and Tool Integration in Isabelle/PIDE},
booktitle = {Interactive Theorem Proving (ITP)},
pages = {515--530},
year = 2014,
doi = {10.1007/978-3-319-08970-6_33},
timestamp = {Sun, 21 May 2017 00:18:59 +0200},
biburl = {https://dblp.org/rec/bib/conf/itp/Wenzel14},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
@InProceedings{ DBLP:journals/corr/Wenzel14,
author = {Makarius Wenzel},
title = {System description: Isabelle/jEdit in 2014},
booktitle = {Proceedings Eleventh Workshop on User Interfaces for
Theorem Provers, {UITP} 2014, Vienna, Austria, 17th July
2014.},
pages = {84--94},
year = 2014,
doi = {10.4204/EPTCS.167.10},
timestamp = {Wed, 03 May 2017 14:47:58 +0200},
biburl = {https://dblp.org/rec/bib/journals/corr/Wenzel14},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
@InProceedings{ DBLP:conf/mkm/BarrasGHRTWW13,
author = {Bruno Barras and Lourdes Del Carmen
Gonz{\'{a}}lez{-}Huesca and Hugo Herbelin and Yann
R{\'{e}}gis{-}Gianas and Enrico Tassi and Makarius Wenzel
and Burkhart Wolff},
title = {Pervasive Parallelism in Highly-Trustable Interactive
Theorem Proving Systems},
booktitle = {Intelligent Computer Mathematics - MKM, Calculemus, DML,
and Systems and Projects},
pages = {359--363},
year = 2013,
doi = {10.1007/978-3-642-39320-4_29},
timestamp = {Sun, 04 Jun 2017 10:10:26 +0200},
biburl = {https://dblp.org/rec/bib/conf/mkm/BarrasGHRTWW13},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
@TechReport{ bsi:50128:2014,
type = {Standard},
key = {BS EN 50128:2011},
month = apr,
year = 2014,
series = {British Standards Publication},
title = {BS EN 50128:2011: Railway applications -- Communication,
signalling and processing systems -- Software for railway
control and protecting systems},
institution = {Britisch Standards Institute (BSI)},
keywords = {CENELEC},
abstract = {This European Standard is part of a group of related
standards. The others are EN 50126-1:1999 "Railway
applications -- The specification and demonstration of
Reliability, Availability, Maintainability and Safety
(RAMS) -- Part 1: Basic requirements and generic process --
and EN 50129:2003 "Railway applications -- Communication,
signalling and processing systems -- Safety related
electronic systems for signalling". EN 50126-1 addresses
system issues on the widest scale, while EN 50129 addresses
the approval process for individual systems which can exist
within the overall railway control and protection system.
This European Standard concentrates on the methods which
need to be used in order to provide software which meets
the demands for safety integrity which are placed upon it
by these wider considerations. This European Standard
provides a set of requirements with which the development,
deployment and maintenance of any safety-related software
intended for railway control and protection applications
shall comply. It defines requirements concerning
organisational structure, the relationship between
organisations and division of responsibility involved in
the development, deployment and maintenanceactivities.}
}
@TechReport{ ds:50126-1:2014,
type = {Standard},
key = {DS/EN 50126-1:1999},
month = oct,
year = 2014,
series = {Dansk standard},
title = {EN 50126-1:1999: Railway applications -- The specification
and demonstration of Reliability, Availability,
Maintainability and Safety (RAMS) -- Part 1: Basic
requirements and generic process},
institution = {Danish Standards Foundation},
keywords = {CENELEC},
abstract = {This European Standard provides Railway Authorities and
the railway support industry, throughout the European
Union, with a process which will enable the implementation
of a consistent approach to the management of reliablity,
availability, maintainability and safety, denoted by the
acronym RAMS. Processes for the specification and
demonstration of RAMS requirements are cornerstones of this
standard. This European Standardc aims to promote a common
understanding and approach to the management of RAMS.
This European Standard can be applied systematically by a
railway authority and railway support industry,
throughoutall phasesof thelifecycle of a railway
application, to develop railway specific RAMS requirements
and to achieve compliance with these requirements. The
systems-level approach defined by this European Standard
facilitates assessment of the RAMS interactions between
elements of complex railway applications. This European
Standard promotes co-operation between railway authority
and railway support industry, within a variety of
procurementstrategies, in the achievement of an optimal
combination of RAMS and costfor railway applications.
Adoption of this European Standard will support the
principles of the European Single Market andfacilitate
Europeanrailway inter-operability. The process defined by
this European Standard assumesthat railway authorities and
railway support industry have business-level policies
addressing Quality, Performance and Safety. The approach
defined in this standard is consistent with the application
of quality management requirements contained within the ISO
9000 series of International standards.}
}
@Book{ paulson:ml:1996,
author = {Lawrence C. Paulson},
title = {{ML} for the Working Programmer},
publisher = {Cambridge Press},
year = 1996,
url = {http://www.cl.cam.ac.uk/~lp15/MLbook/pub-details.html},
acknowledgement={none}
}
@Book{ pollak:beginning:2009,
title = {Beginning Scala},
author = {David Pollak},
publisher = {Apress},
year = 2009,
isbn = {978-1-4302-1989-7}
}
@Article{ klein:operating:2009,
author = {Gerwin Klein},
title = {Operating System Verification --- An Overview},
journal = {S\={a}dhan\={a}},
publisher = pub-springer,
year = 2009,
volume = 34,
number = 1,
month = feb,
pages = {27--69},
abstract = {This paper gives a high-level introduction to the topic of
formal, interactive, machine-checked software verification
in general, and the verification of operating systems code
in particular. We survey the state of the art, the
advantages and limitations of machine-checked code proofs,
and describe two specific ongoing larger-scale verification
projects in more detail.}
}
@InProceedings{ wenzel:system:2014,
author = {Makarius Wenzel},
title = {System description: Isabelle/jEdit in 2014},
booktitle = {Workshop on User Interfaces for Theorem Provers, {UITP}},
pages = {84--94},
year = 2014,
doi = {10.4204/EPTCS.167.10},
timestamp = {Wed, 12 Sep 2018 01:05:15 +0200},
editor = {Christoph Benzm{\"{u}}ller and Bruno {Woltzenlogel Paleo}},
volume = 167
}
@InProceedings{ feliachi.ea:circus:2013,
author = {Abderrahmane Feliachi and Marie{-}Claude Gaudel and
Makarius Wenzel and Burkhart Wolff},
title = {The Circus Testing Theory Revisited in Isabelle/HOL},
booktitle = {{ICFEM}},
series = {Lecture Notes in Computer Science},
volume = 8144,
pages = {131--147},
publisher = {Springer},
year = 2013
}
@Article{ Klein2014,
author = {Gerwin Klein and June Andronick and Kevin Elphinstone and
Toby C. Murray and Thomas Sewell and Rafal Kolanski and
Gernot Heiser},
title = {Comprehensive formal verification of an {OS} microkernel},
journal = {{ACM} Trans. Comput. Syst.},
year = 2014,
volume = 32,
number = 1,
pages = {2:1--2:70},
bibsource = {dblp computer science bibliography, https://dblp.org},
biburl = {https://dblp.org/rec/bib/journals/tocs/KleinAEMSKH14},
doi = {10.1145/2560537},
timestamp = {Tue, 03 Jan 2017 11:51:57 +0100},
url = {http://doi.acm.org/10.1145/2560537}
}
@InProceedings{ bicchierai.ea:using:2013,
author = {Bicchierai, Irene and Bucci, Giacomo and Nocentini, Carlo
and Vicario, Enrico},
editor = {Keller, Hubert B. and Pl{\"o}dereder, Erhard and Dencker,
Peter and Klenk, Herbert},
title = {Using Ontologies in the Integration of Structural,
Functional, and Process Perspectives in the Development of
Safety Critical Systems},
booktitle = {Reliable Software Technologies -- Ada-Europe 2013},
year = 2013,
publisher = {Springer Berlin Heidelberg},
address = {Berlin, Heidelberg},
pages = {95--108},
abstract = {We present a systematic approach for the efficient
management of the data involved in the development process
of safety critical systems, illustrating how the activities
performed during the life-cycle can be integrated in a
common framework. Information needed in these activities
reflects concepts that pertain to three different
perspectives: i) structural elements of design and
implementation; ii) functional requirements and quality
attributes; iii) organization of the overall process. The
integration of these concepts may considerably improve the
trade-off between reward and effort spent in verification
and quality-driven activities.},
isbn = {978-3-642-38601-5}
}
@Article{ zhao.ea:formal:2016,
author = {Yongwang Zhao and David San{\'{a}}n and Fuyuan Zhang and
Yang Liu},
title = {Formal Specification and Analysis of Partitioning
Operating Systems by Integrating Ontology and Refinement},
journal = {{IEEE} Trans. Industrial Informatics},
volume = 12,
number = 4,
pages = {1321--1331},
year = 2016,
abstract = {Partitioning operating systems (POSs) have been widely
applied in safety-critical domains from aerospace to
automotive. In order to improve the safety and the
certification process of POSs, the ARINC 653 standard has
been developed and complied with by the mainstream POSs.
Rigorous formalization of ARINC 653 can reveal hidden
errors in this standard and provide a necessary foundation
for formal verification of POSs and ARINC 653 applica-
tions. For the purpose of reusability and efficiency, a
novel methodology by integrating ontology and refinement is
proposed to formally specify and analyze POSs in this
paper. An ontology of POSs is developed as an intermediate
model between informal descriptions of ARINC 653 and the
formal specification in Event-B. A semiautomatic
translation from the ontology and ARINC 653 into Event-B is
implemented, which leads to a complete Event-B
specification for ARINC 653 compliant POSs. During the
formal analysis, six hidden errors in ARINC 653 have been
discovered and fixed in the Event-B specification. We also
validate the existence of these errors in two open-source
POSs, i.e., XtratuM and POK. By introducing the ontology,
the degree of automatic verification of the Event-B
specification reaches a higher level}
}
@InProceedings{ denney.ea:evidence:2013,
author = {E. {Denney} and G. {Pai}},
booktitle = {2013 IEEE International Symposium on Software Reliability
Engineering Workshops (ISSREW)},
title = {Evidence arguments for using formal methods in software
certification},
year = 2013,
pages = {375--380},
abstract = {We describe a generic approach for automatically
integrating the output generated from a formal method/tool
into a software safety assurance case, as an evidence
argument, by (a) encoding the underlying reasoning as a
safety case pattern, and (b) instantiating it using the
data produced from the method/tool. We believe this
approach not only improves the trustworthiness of the
evidence generated from a formal method/tool, by explicitly
presenting the reasoning and mechanisms underlying its
genesis, but also provides a way to gauge the suitability
of the evidence in the context of the wider assurance case.
We illustrate our work by application to a real example-an
unmanned aircraft system - where we invoke a formal code
analysis tool from its autopilot software safety case,
automatically transform the verification output into an
evidence argument, and then integrate it into the former.},
keywords = {aircraft;autonomous aerial vehicles;formal
verification;safety-critical software;evidence
arguments;formal methods;software certification;software
safety assurance case;safety case pattern;unmanned aircraft
system;formal code analysis;autopilot software safety
case;verification output;Safety;Software
safety;Cognition;Computer
architecture;Context;Encoding;Safety cases;Safety case
patterns;Formal methods;Argumentation;Software
certification},
doi = {10.1109/ISSREW.2013.6688924},
month = {Nov}
}
@InProceedings{ kaluvuri.ea:quantitative:2014,
author = {Kaluvuri, Samuel Paul and Bezzi, Michele and Roudier,
Yves},
editor = {Eckert, Claudia and Katsikas, Sokratis K. and Pernul,
G{\"u}nther},
title = {A Quantitative Analysis of Common Criteria Certification
Practice},
booktitle = {Trust, Privacy, and Security in Digital Business},
year = 2014,
publisher = {Springer International Publishing},
address = {Cham},
pages = {132--143},
abstract = {The Common Criteria (CC) certification framework defines a
widely recognized, multi-domain certification scheme that
aims to provide security assurances about IT products to c\
onsumers. However, the CC scheme does not prescribe a
monitoring scheme for the CC practice, raising concerns
about the quality of the security assurance provided by the
certification a\ nd questions on its usefulness. In this
paper, we present a critical analysis of the CC practice
that concretely exposes the limitations of current
approaches. We also provide direction\ s to improve the CC
practice.},
isbn = {978-3-319-09770-1}
}
@InProceedings{ ekelhart.ea:ontological:2007,
author = {Ekelhart, Andreas and Fenz, Stefan and Goluch, Gernot and
Weippl, Edgar},
editor = {Venter, Hein and Eloff, Mariki and Labuschagne, Les and
Eloff, Jan and von Solms, Rossouw},
title = {Ontological Mapping of Common Criteria's Security
Assurance Requirements},
booktitle = {New Approaches for Security, Privacy and Trust in Complex
Environments},
year = 2007,
publisher = {Springer US},
address = {Boston, MA},
pages = {85--95},
abstract = {The Common Criteria (CC) for Information Technology
Security Evaluation provides comprehensive guidelines \ for
the evaluation and certification of IT security regarding
data security and data privacy. Due to the very comple\ x
and time-consuming certification process a lot of companies
abstain from a CC certification. We created the CC Ont\
ology tool, which is based on an ontological representation
of the CC catalog, to support the evaluator at the certi\
fication process. Tasks such as the planning of an
evaluation process, the review of relevant documents or the
creat\ ing of reports are supported by the CC Ontology
tool. With the development of this tool we reduce the time
and costs\ needed to complete a certification.},
isbn = {978-0-387-72367-9}
}
@InProceedings{ fenz.ea:formalizing:2009,
author = {Fenz, Stefan and Ekelhart, Andreas},
title = {Formalizing Information Security Knowledge},
booktitle = {Proceedings of the 4th International Symposium on
Information, Computer, and Communications Security},
series = {ASIACCS '09},
year = 2009,
isbn = {978-1-60558-394-5},
location = {Sydney, Australia},
pages = {183--194},
numpages = 12,
url = {http://doi.acm.org/10.1145/1533057.1533084},
doi = {10.1145/1533057.1533084},
acmid = 1533084,
publisher = {ACM},
address = {New York, NY, USA},
keywords = {information security, risk management, security ontology},
abstract = {Unified and formal knowledge models of the information
security domain are fundamental requirements for supporting
and enhancing existing risk management approaches. This
paper describes a security ontology which provides an
ontological structure for information security domain
knowledge. Besides existing best-practice guidelines such
as the German IT Grundschutz Manual also concrete knowledge
of the considered organization is incorporated. An
evaluation conducted by an information security expert team
has shown that this knowledge model can be used to support
a broad range of information security risk management
approaches.}
}
@InProceedings{ gleirscher.ea:incremental:2007,
author = {M. {Gleirscher} and D. {Ratiu} and B. {Schatz}},
booktitle = {2007 International Conference on Systems Engineering and
Modeling},
title = {Incremental Integration of Heterogeneous Systems Views},
year = 2007,
pages = {50--59},
abstract = {To master systems complexity, their industrial development
requires specialized heterogeneous views and techniques and
- correspondingly - engineering tools. These views
generally cover only parts of the system under development,
and critical development defects often occur at the gaps
between them. To successfully achieve an integration that
bridges these gaps, we must tackle it both from the
methodical as well as from the tooling sides. The former
requires answers to questions like: What are the views
provided by the tools? How are they related and extended to
achieve consistency or to form new views? - while the
latter requires answers to: How are views extracted from
the tools? How are they composed and provided to the user?
Our approach, suitable for incremental integration, is
demonstrated in the tool integration framework ToolNet.},
keywords = {computer aided engineering;computer aided software
engineering;software tools;heterogeneous systems
views;systems complexity;tool integration
framework;ToolNet;engineering tools;Systems engineering and
theory;Certification;Integrated circuit
modeling;Bridges;Software tools;Computer aided software
engineering;Computer aided engineering;Costs;Natural
languages;Formal specifications},
doi = {10.1109/ICSEM.2007.373334},
month = {March}
}
@Booklet{ omg:sacm:2018,
bibkey = {omg:sacm:2018},
key = omg,
abstract = {This specification defines a metamodel for representing
structured assurance cases. An Assurance Case is a set of
auditable claims, arguments, and evidence created to
support the claim that a defined system/service will
satisfy the particular requirements. An Assurance Case is a
document that facilitates information exchange between
various system stakeholders such as suppliers and
acquirers, and between the operator and regulator, where
the knowledge related to the safety and security of the
system is communicated in a clear and defendable way. Each
assurance case should communicate the scope of the system,
the operational context, the claims, the safety and/or
security arguments, along with the corresponding
evidence.},
publisher = omg,
language = {USenglish},
month = mar,
keywords = {SACM},
topic = {formalism},
note = {Available as OMG document
\href{http://www.omg.org/cgi-bin/doc?formal/2018-02-02}
{formal/2018-02-02}},
public = {yes},
title = {Structured Assurance Case Metamodel (SACM)},
year = 2018
}
@InProceedings{ kelly.ea:goal:2004,
title = {The Goal Structuring Notation -- A Safety Argument
Notation},
booktitle = {Dependable Systems and Networks},
year = 2004,
month = jul,
author = {Tim Kelly and Rob Weaver}
}
@TechReport{ rushby:formal:1993,
author = {John Rushby},
title = {Formal Methods and the Certification of Critical Systems},
institution = {Computer Science Laboratory, SRI International},
year = 1993,
number = {SRI-CSL-93-7},
address = {Menlo Park, CA},
note = {Also issued under the title {\em Formal Methods and
Digital Systems Validation for Airborne Systems\/} as NASA
Contractor Report 4551, December 1993},
month = dec
}
@InProceedings{ greenaway.ea:bridging:2012,
author = {Greenaway, David and Andronick, June and Klein, Gerwin},
editor = {Beringer, Lennart and Felty, Amy},
title = {Bridging the Gap: Automatic Verified Abstraction of C},
booktitle = {Interactive Theorem Proving},
year = 2012,
publisher = {Springer Berlin Heidelberg},
address = {Berlin, Heidelberg},
pages = {99--115},
abstract = {Before low-level imperative code can be reasoned about in
an interactive theorem prover, it must first be converted
into a logical representation in that theorem prover.
Accurate translations of such code should be conservative,
choosing safe representations over representations
convenient to reason about. This paper bridges the gap
between conservative representation and convenient
reasoning. We present a tool that automatically abstracts
low-level C semantics into higher level specifications,
while generating proofs of refinement in Isabelle/HOL for
each translation step. The aim is to generate a verified,
human-readable specification, convenient for further
reasoning.},
isbn = {978-3-642-32347-8}
}
@inproceedings{BCPW2018,
title = {Making Agile Development Processes fit for V-style Certification
Procedures},
author = {Bezzecchi, S. and Crisafulli, P. and Pichot, C. and Wolff, B.},
booktitle = {{ERTS'18}},
abstract = {We present a process for the development of safety and security
critical components in transportation systems targeting a high-level
certification (CENELEC 50126/50128, DO 178, CC ISO/IEC 15408).
The process adheres to the objectives of an ``agile development'' in
terms of evolutionary flexibility and continuous improvement. Yet, it
enforces the overall coherence of the development artifacts (ranging from
proofs over tests to code) by a particular environment (CVCE).
In particular, the validation process is built around a formal development
based on the interactive theorem proving system Isabelle/HOL, by linking the
business logic of the application to the operating system model, down to
code and concrete hardware models thanks to a series of refinement proofs.
We apply both the process and its support in CVCE to a case-study that
comprises a model of an odometric service in a railway-system with its
corresponding implementation integrated in seL4 (a secure kernel for
which a comprehensive Isabelle development exists). Novel techniques
implemented in Isabelle enforce the coherence of semi-formal
and formal definitions within to specific certification processes
in order to improve their cost-effectiveness.
},
pdf = {https://www.lri.fr/~wolff/papers/conf/2018erts-agile-fm.pdf},
year = {2018},
series = {ERTS Conference Proceedings},
location = {Toulouse}
}

View File

@ -1,5 +0,0 @@
heading_prefix "{\\large\\textbf{"
heading_suffix "}\\hfil}\\nopagebreak\n"
headings_flag 1
symhead_positive "Symbols"

View File

@ -1,675 +0,0 @@
(*************************************************************************
* Copyright (C)
* 2019 The University of Exeter
* 2018-2019 The University of Paris-Saclay
* 2018 The University of Sheffield
*
* License:
* This program can be redistributed and/or modified under the terms
* of the 2-clause BSD-style license.
*
* SPDX-License-Identifier: BSD-2-Clause
*************************************************************************)
(*<*)
theory
mini_odo
imports
"Isabelle_DOF-Ontologies.CENELEC_50128"
"Isabelle_DOF.technical_report"
"Physical_Quantities.SI" "Physical_Quantities.SI_Pretty"
begin
use_template "scrreprt-modern"
use_ontology technical_report and "Isabelle_DOF-Ontologies.CENELEC_50128"
declare[[strict_monitor_checking=true]]
define_shortcut* dof \<rightleftharpoons> \<open>\dof\<close>
isadof \<rightleftharpoons> \<open>\isadof{}\<close>
(*>*)
title*[title::title]\<open>The CENELEC 50128 Ontology\<close>
subtitle*[subtitle::subtitle]\<open>Case Study: An Odometer-Subsystem\<close>
chapter*[casestudy::technical]\<open>An Odometer-Subsystem\<close>
text\<open>
In our case study, we will follow the phases of analysis, design, and implementation of the
odometry function of a train. This \<^cenelec_term>\<open>SF\<close> processes data from an odometer to compute
the position, speed, and acceleration of a train. This system provides the basis for many
safety critical decisions, \<^eg>, the opening of the doors. Due to its relatively small size, it
is a manageable, albeit realistic target for a comprehensive formal development: it covers a
physical model of the environment, the physical and architectural model of the odometer,
but also the \<^cenelec_term>\<open>SFRS\<close> aspects including the problem of numerical sampling and the
boundaries of efficient computations. The interplay between environment and measuring-device as
well as the implementation problems on a platform with limited resources makes the odometer a
fairly typical \<^cenelec_term>\<open>safety\<close> critical \<^cenelec_term>\<open>component\<close> of an embedded system.
The case-study is presented in form of an \<^emph>\<open>integrated source\<close> in \<^isadof> containing all four
reports from the phases:
\<^item> \<^term>\<open>software_requirements\<close> with deliverable \<^doc_class>\<open>SWRS\<close>
(or long:\<^typ>\<open>software_requirements_specification\<close>(-report))
\<^item> \<^term>\<open>software_architecture_and_design\<close> with deliverable \<^doc_class>\<open>SWDS\<close>
(or long: \<^typ>\<open>software_design_specification\<close>(-report))
\<^item> \<^term>\<open>software_component_design\<close> with deliverable \<^doc_class>\<open>SWCDVR\<close>
(or long: \<^typ>\<open>software_component_design_verification\<close>(-report).)
\<^item> \<^term>\<open>component_implementation_and_testing\<close> with deliverable \<^doc_class>\<open>SWADVR\<close>
(or long: \<^typ>\<open>software_architecture_and_design_verification\<close>(-report))
The objective of this case study is to demonstrate deep-semantical ontologoies in
software developments targeting certifications, and in particular, how \<^isadof>'s
integrated source concept permits to assure \<^cenelec_term>\<open>traceability\<close>.
\<^bold>\<open>NOTE\<close> that this case study has aspects that were actually covered by CENELEC 50126 -
the 'systems'-counterpart covering hardware aspects. Recall that the CENELEC 50128 covers
software.
Due to space reasons, we will focus on the analysis part of the integrated
document; the design and code parts will only be outlined in a final resume. The
\<^emph>\<open>ontological embedding\<close>, which represents a main contribution of this paper, will be presented
in the next two sections.
We start with the capture of a number of informal documents available at the beginning of the
development.
\<close>
section\<open>A CENELEC-conform development as an \<^emph>\<open>Integrated Source\<close>\<close>
text\<open>Accurate information of a train's location along a track is in an important prerequisite
to safe railway operation. Position, speed and acceleration measurement usually lies on a
set of independent measurements based on different physical principles---as a way to enhance
precision and availability. One of them is an \<^emph>\<open>odometer\<close>, which allows estimating a relative
location while the train runs positions established by other measurements. \<close>
subsection\<open>Capturing ``Basic Principles of Motion and Motion Measurement.''\<close>
text\<open>
A rotary encoder measures the motion of a train. To achieve this, the encoder's shaft is fixed to
the trains wheels axle. When the train moves, the encoder produces a signal pattern directly
related to the trains progress. By measuring the fractional rotation of the encoders shaft and
considering the wheels effective ratio, relative movement of the train can be calculated.
\begin{wrapfigure}[8]{l}{4.6cm}
\centering
\vspace{-.5cm}
\includegraphics[width=3.4cm]{figures/wheel-df}
\caption{Motion sensing via an odometer.}
\label{wheel-df}
\end{wrapfigure}
\autoref{wheel-df} shows that we model a train, seen from a pure kinematics standpoint, as physical
system characterized by a one-dimensional continuous distance function, which represents the
observable of the physical system. Concepts like speed and acceleration were derived concepts
defined as their (gradient) derivatives. We assume the use of the meter, kilogram, and second
(MKS) system.
This model is already based on several fundamental assumptions relevant for the correct
functioning of the system and for its integration into the system as a whole. In
particular, we need to make the following assumptions explicit: \<^vs>\<open>-0.3cm\<close>\<close>
text*["perfect_wheel"::assumption]
\<open>\<^item> the wheel is perfectly circular with a given, constant radius. \<^vs>\<open>-0.3cm\<close>\<close>
text*["no_slip"::assumption]
\<open>\<^item> the slip between the trains wheel and the track negligible. \<^vs>\<open>-0.3cm\<close>\<close>
text*["constant_teeth_dist"::assumption]
\<open>\<^item> the distance between all teeth of a wheel is the same and constant, and \<^vs>\<open>-0.3cm\<close>\<close>
text*["constant_sampling_rate"::assumption]
\<open>\<^item> the sampling rate of positions is a given constant.\<close>
text\<open>
These assumptions have to be traced throughout the certification process as
\<^emph>\<open>derived requirements\<close> (or, in CENELEC terminology, as \<^emph>\<open>exported constraints\<close>), which is
also reflected by their tracing throughout the body of certification documents. This may result
in operational regulations, \<^eg>, regular checks for tolerable wheel defects. As for the
\<^emph>\<open>no slip\<close>-assumption, this leads to the modeling of constraints under which physical
slip can be neglected: the device can only produce reliable results under certain physical
constraints (speed and acceleration limits). Moreover, the \<^emph>\<open>no slip\<close>-assumption motivates
architectural arrangements for situations where this assumption cannot be assured (as is the
case, for example, of an emergency breaking) together with error-detection and error-recovery.
\<close>
subsection\<open>Capturing ``System Architecture.''\<close>
figure*["three_phase"::figure,relative_width="70",file_src="''figures/three-phase-odo.pdf''"]
\<open>An odometer with three sensors \<open>C1\<close>, \<open>C2\<close>, and \<open>C3\<close>.\<close>
text\<open>
The requirements analysis also contains a document \<^doc_class>\<open>SYSAD\<close>
(\<^typ>\<open>system_architecture_description\<close>) that contains technical drawing of the odometer,
a timing diagram (see \<^figure>\<open>three_phase\<close>), and tables describing the encoding of the position
for the possible signal transitions of the sensors \<open>C1\<close>, \<open>C2\<close>, and \<open>C3\<close>.
\<close>
subsection\<open>Capturing ``System Interfaces.''\<close>
text\<open>
The requirements analysis also contains a sub-document \<^doc_class>\<open>FnI\<close> (\<^typ>\<open>functions_and_interfaces\<close>)
describing the technical format of the output of the odometry function.
This section, \<^eg>, specifies the output \<^emph>\<open>speed\<close> as given by a \<^verbatim>\<open>int_32\<close> to be the
``Estimation of the speed (in mm/sec) evaluated over the latest \<open>N\<^sub>a\<^sub>v\<^sub>g\<close> samples''
where the speed refers to the physical speed of the train and \<open>N\<^sub>a\<^sub>v\<^sub>g\<close> a parameter of the
sub-system configuration. \<close>
(*<*)
declare_reference*["df_numerics_encshaft"::figure]
(*>*)
subsection\<open>Capturing ``Required Performances.''\<close>
text\<open>
The given analysis document is relatively implicit on the expected precision of the measurements;
however, certain interface parameters like \<open>Odometric_Position_TimeStamp\<close>
(a counter on the number of samplings) and \<open>Relative_Position\<close> are defined by as
unsigned 32 bit integer. These definitions imply that exported constraints concerning the acceptable
time of service as well the maximum distance before a necessary reboot of the subsystem.
For our case-study, we assume maximum deviation of the \<open>Relative_Position\<close> to the
theoretical distance.
The requirement analysis document describes the physical environment, the architecture
of the measuring device, and the required format and precision of the measurements of the odometry
function as represented (see @{figure (unchecked) "df_numerics_encshaft"}).\<close>
figure*["df_numerics_encshaft"::figure,relative_width="76",file_src="''figures/df-numerics-encshaft.png''"]
\<open>Real distance vs. discrete distance vs. shaft-encoder sequence\<close>
subsection\<open>Capturing the ``Software Design Spec'' (Resume).\<close>
text\<open>
The design provides a function that manages an internal first-in-first-out buffer of
shaft-encodings and corresponding positions. Central for the design is a step-function analyzing
new incoming shaft encodings, checking them and propagating two kinds of error-states (one allowing
recovery, another one, fatal, signaling, \<^eg>, a defect of the receiver hardware),
calculating the relative position, speed and acceleration.
\<close>
subsection\<open>Capturing the ``Software Implementation'' (Resume).\<close>
text\<open>
While the design is executable on a Linux system, it turns out that the generated code from an
Isabelle model is neither executable on resource-constraint target platform, an ARM-based
Sabre-light card, nor certifiable, since the compilation chain via ML to C implies the
inclusion of a run-time system and quite complex libraries.
We adopted therefore a similar approach as used in the seL4 project~@{cite "Klein2014"}: we use a
hand-written implementation in C and verify it via
AutoCorres~@{cite "greenaway.ea:bridging:2012"} against
the design model. The hand-written C-source is integrated into the Isabelle/HOL technically by
registering it in the build-configuration and logically by a trusted C-to-HOL compiler included
in AutoCorres.
\<close>
(*<*)
definition teeth_per_wheelturn::nat ("tpw") where "tpw \<equiv> SOME x. x > 0"
definition wheel_diameter ::"real[m]" ("w\<^sub>d") where "w\<^sub>d \<equiv> SOME x. x > 0"
definition wheel_circumference::"real[m]" ("w\<^sub>0") where "w\<^sub>0 \<equiv> pi *\<^sub>Q w\<^sub>d"
definition \<delta>s\<^sub>r\<^sub>e\<^sub>s ::"real[m]" where "\<delta>s\<^sub>r\<^sub>e\<^sub>s \<equiv> 1 / (2 * 3 * tpw) *\<^sub>Q w\<^sub>0 "
(*>*)
section\<open>Formal Enrichment of the Software Requirements Specification\<close>
text\<open>
After the \<^emph>\<open>capture\<close>-phase, where we converted/integrated existing informal analysis and design
documents as well as code into an integrated Isabelle document, we entered into the phase of
\<open>formal enrichment\<close>. For example, from the assumptions in the architecture follow
the definitions:
@{theory_text [display]\<open>
definition teeth_per_wheelturn::nat ("tpw") where "tpw \<equiv> SOME x. x > 0"
definition wheel_diameter::"real[m]" ("w\<^sub>d") where "w\<^sub>d \<equiv> SOME x. x > 0"
definition wheel_circumference::"real[m]" ("w\<^sub>0") where "w\<^sub>0 \<equiv> pi *\<^sub>Q w\<^sub>d"
definition \<delta>s\<^sub>r\<^sub>e\<^sub>s::"real[m]" where "\<delta>s\<^sub>r\<^sub>e\<^sub>s \<equiv> 1 / (2 * 3 * tpw) *\<^sub>Q w\<^sub>0 "
\<close>}
Here, \<open>real\<close> refers to the real numbers as defined in the HOL-Analysis library, which provides
concepts such as Cauchy Sequences, limits, differentiability, and a very substantial part of
classical Calculus. \<open>SOME\<close> is the Hilbert choice operator from HOL; the definitions of the
model parameters admit all possible positive values as uninterpreted constants. Our
\<^assumption>\<open>perfect_wheel\<close> is translated into a calculation of the circumference of the
wheel, while \<open>\<delta>s\<^sub>r\<^sub>e\<^sub>s\<close>, the resolution of the odometer, can be calculated
from the these parameters. HOL-Analysis permits to formalize the fundamental physical observables:
\<close>
(*<*)
type_synonym distance_function = "real[s] \<Rightarrow> real[m]"
consts Speed::"distance_function \<Rightarrow> real[s] \<Rightarrow> real[m\<cdot>s\<^sup>-\<^sup>1]"
consts Accel::"distance_function \<Rightarrow> real[s] \<Rightarrow> real[m\<cdot>s\<^sup>-\<^sup>2]"
consts Speed\<^sub>M\<^sub>a\<^sub>x::"real[m\<cdot>s\<^sup>-\<^sup>1]"
(* Non - SI conform common abrbreviations *)
definition "kmh \<equiv> kilo *\<^sub>Q metre \<^bold>/ hour :: 'a::{field,ring_char_0}[m\<cdot>s\<^sup>-\<^sup>1]"
definition "kHz \<equiv> kilo *\<^sub>Q hertz :: 'a::{field,ring_char_0}[s\<^sup>-\<^sup>1]"
(*>*)
text\<open>
@{theory_text [display]\<open>
type_synonym distance_function = "real[s]\<Rightarrow>real[m]"
definition Speed::"distance_function\<Rightarrow>real\<Rightarrow>real" where "Speed f \<equiv> deriv f"
definition Accel::"distance_function\<Rightarrow>real\<Rightarrow>real" where "Accel f \<equiv> deriv (deriv f)"
\<close>}
which permits to constrain the central observable \<open>distance_function\<close> in a
way that they describe the space of ``normal behavior'' where we expect the odometer to produce
reliable measurements over a \<open>distance_function df\<close> .
The essence of the physics of the train is covered by the following definition:
@{theory_text [display]\<open>
definition normally_behaved_distance_function :: "(real \<Rightarrow> real) \<Rightarrow> bool"
where normally_behaved_distance_function df =
( \<forall> t. df(t) \<in> \<real>\<^sub>\<ge>\<^sub>0 \<and> (\<forall> t \<in> \<real>\<real>\<^sub>\<ge>\<^sub>0. df(t) = 0)
\<and> df differentiable on \<real>\<^sub>\<ge>\<^sub>0 \<and> (Speed df)differentiable on \<real>\<^sub>\<ge>\<^sub>0$
\<and> (Accel df)differentiable on \<real>\<^sub>\<ge>\<^sub>0
\<and> (\<forall> t. (Speed df) t \<in> {Speed\<^sub>M\<^sub>i\<^sub>n .. Speed\<^sub>M\<^sub>a\<^sub>x})
\<and> (\<forall> t. (Accel df) t \<in> {Accel\<^sub>M\<^sub>i\<^sub>n .. Accel\<^sub>M\<^sub>a\<^sub>x}))
\<close>}
which constrains the distance functions in the bounds described of the informal descriptions and
states them as three-fold differentiable function in certain bounds concerning speed and
acceleration. Note that violations, in particular of the constraints on speed and acceleration,
\<^emph>\<open>do\<close> occur in practice. In such cases, the global system adapts recovery strategies that are out
of the scope of our model. Concepts like \<open>shaft_encoder_state\<close> (a triple with the sensor values
\<open>C1\<close>, \<open>C2\<close>, \<open>C3\<close>) were formalized as types, while tables were
defined as recursive functions:
@{theory_text [display]\<open>
fun phase\<^sub>0 :: "nat \<Rightarrow> shaft_encoder_state" where
"phase\<^sub>0 (0) = \<lparr> C1 = False, C2 = False, C3 = True \<rparr>"
|"phase\<^sub>0 (1) = \<lparr> C1 = True, C2 = False, C3 = True \<rparr>"
|"phase\<^sub>0 (2) = \<lparr> C1 = True, C2 = False, C3 = False\<rparr>"
|"phase\<^sub>0 (3) = \<lparr> C1 = True, C2 = True, C3 = False\<rparr>"
|"phase\<^sub>0 (4) = \<lparr> C1 = False, C2 = True, C3 = False\<rparr>"
|"phase\<^sub>0 (5) = \<lparr> C1 = False, C2 = True, C3 = True \<rparr>"
|"phase\<^sub>0 x = phase\<^sub>0(x - 6)"
definition Phase ::"nat\<Rightarrow>shaft_encoder_state" where Phase(x) = phase\<^sub>0(x-1)
\<close>}
We now define shaft encoder sequences as translations of distance functions:
@{theory_text [display]\<open>
definition encoding::"distance_function\<Rightarrow>nat\<Rightarrow>real\<Rightarrow>shaft_encoder_state"
where "encoding df init\<^sub>p\<^sub>o\<^sub>s \<equiv> \<lambda>x. Phase(nat\<lfloor>df(x) / \<delta>s\<^sub>r\<^sub>e\<^sub>s\<rfloor> + init\<^sub>p\<^sub>o\<^sub>s)"
\<close>}
where \<open>init\<^sub>p\<^sub>o\<^sub>s\<close> is the initial position of the wheel.
\<open>sampling\<close>'s were constructed from encoding sequences over discretized time points:
@{theory_text [display]\<open>
definition sampling::"distance_function\<Rightarrow>nat\<Rightarrow>real\<Rightarrow>nat\<Rightarrow>shaft_encoder_state"
where "sampling df init\<^sub>p\<^sub>o\<^sub>s \<delta>t \<equiv> \<lambda>n::nat. encoding df initinit\<^sub>p\<^sub>o\<^sub>s (n * \<delta>t)"
\<close>}
parameter of the configuration of a system.
Finally, we can formally define the required performances. From the interface description
and the global model parameters such as wheel diameter, the number of teeth per wheel, the
sampling frequency etc., we can infer the maximal time of service as well the maximum distance
the device can measure. As an example configuration, choosing:
\<^item> \<^term>\<open>(1 *\<^sub>Q metre):: real[m]\<close> for \<^term>\<open>w\<^sub>d\<close> (wheel-diameter),
\<^item> \<^term>\<open>100 :: real\<close> for \<^term>\<open>tpw\<close> (teeth per wheel),
\<^item> \<^term>\<open>80 *\<^sub>Q kmh :: real[m\<cdot>s\<^sup>-\<^sup>1]\<close> for \<^term>\<open>Speed\<^sub>M\<^sub>a\<^sub>x\<close>,
\<^item> \<^term>\<open>14.4 *\<^sub>Q kHz :: real[s\<^sup>-\<^sup>1]\<close> for the sampling frequency,
results in an odometer resolution of \<^term>\<open>2.3 *\<^sub>Q milli *\<^sub>Q metre\<close>, a maximum distance of
\<^term>\<open>9878 *\<^sub>Q kilo *\<^sub>Q metre\<close>, and a maximal system up-time of \<^term>\<open>123.4 *\<^sub>Q hour\<close>s.
The required precision of an odometer can be defined by a constant describing
the maximally allowed difference between \<open>df(n*\<delta>t)\<close> and
\<open>sampling df init\<^sub>p\<^sub>o\<^sub>s \<delta>t n\<close> for all \<open>init\<^sub>p\<^sub>o\<^sub>s \<in>{0..5}\<close>.
\<close>
(*<*)
ML\<open>val two_thirty2 = 1024 * 1024 * 1024 * 4;
val dist_max = 0.0023 * (real two_thirty2) / 1000.0;
val dist_h = dist_max / 80.0\<close>
(*>*)
section*[verific::technical]\<open>Verification of the Software Requirements Specification\<close>
text\<open>The original documents contained already various statements that motivate certain safety
properties of the device. For example, the \<open>Phase\<close>-table excludes situations in which
all sensors \<open>C1\<close>, \<open>C2\<close>, and \<open>C3\<close> are all ``off'' or situations in
which sensors are ``on,'' reflecting a physical or electrical error in the odometer. It can be
shown by a very small Isabelle case-distinction proof that this safety requirement follows indeed
from the above definitions:
@{theory_text [display]\<open>
lemma Encoder_Property_1:(C1(Phase x) \<and> C2(Phase x) \<and> C3(Phase x))=False
proof (cases x)
case 0 then show ?thesis by (simp add: Phase_def)
next
case (Suc n) then show ?thesis
by(simp add: Phase_def,rule_tac n = n in cycle_case_split,simp_all)
qed
\<close>}
for all positions \<open>x\<close>. Similarly, it is proved that the table is indeed cyclic:
\<open>phase\<^sub>0 x = phase\<^sub>0(x mod 6)\<close>
and locally injective:
\<open>\<forall>x<6. \<forall>y<6. phase\<^sub>0 x = phase\<^sub>0 y \<longrightarrow> x = y\<close>
These lemmas, building the ``theory of an odometer,'' culminate in a theorem
that we would like to present in more detail.
@{theory_text [display]\<open>
theorem minimal_sampling :
assumes * : normally_behaved_distance_function df
and ** : \<delta>t * Speed\<^sub>M\<^sub>a\<^sub>x < \<delta>s\<^sub>r\<^sub>e\<^sub>s
shows \<forall> \<delta>X\<le>\<delta>t. 0<\<delta>X \<longrightarrow>
\<exists>f. retracting (f::nat\<Rightarrow>nat) \<and>
sampling df init\<^sub>p\<^sub>o\<^sub>s \<delta>X = (sampling df init\<^sub>p\<^sub>o\<^sub>s \<delta>t) o f
\<close>}
This theorem states for \<open>normally_behaved_distance_function\<close>s that there is
a minimal sampling frequency assuring the safety of the measurements; samplings on
some \<open>df\<close> gained from this minimal sampling frequency can be ``pumped up''
to samplings of these higher sampling frequencies; they do not contain more information.
Of particular interest is the second assumption, labelled ``\<open>**\<close>'' which
establishes a lower bound from \<open>w\<^sub>0\<close>, \<open>tpw\<close>,
\<open>Speed\<^sub>M\<^sub>a\<^sub>x\<close> for the sampling frequency. Methodologically, this represents
an exported constraint that can not be represented \<^emph>\<open>inside\<close> the design model: it means that the
computations have to be fast enough on the computing platform in order to assure that the
calculations are valid. It was in particular this exported constraint that forced us to give up
the original plan to generate the code from the design model and to execute this directly on the
target platform.
For our example configuration (1m diameter, 100 teeth per wheel, 80km/h max), this theorem justifies
that 14,4 kHz is indeed enough to assure valid samplings. Such properties are called
``internal consistency of the software requirements specification'' in the CENELEC
standard~@{cite "bsi:50128:2014"}, 7.2.4.22 and are usually addressed in an own report.
\<close>
chapter*[ontomodeling::text_section]\<open>The CENELEC 50128 Ontology\<close>
text\<open>
Modeling an ontology from a semi-formal text such as~@{cite"bsi:50128:2014"} is,
like any other modeling activity, not a simple one-to-one translation of some
concepts to some formalism. Rather, implicit and self-understood principles
have to be made explicit, abstractions have to be made, and decisions about
the kind of desirable user-interaction may have an influence similarly to
design decisions influenced by strengths or weaknesses of a programming language.
\<close>
section*[lhf::text_section]
\<open>Tracking Concepts and Definitions\<close>
text\<open>
\<^isadof> is designed to annotate text elements with structured meta-information and to reference
these text elements throughout the integrated source. A classical application of this capability
is the annotation of concepts and terms definitions---be them informal, semi-formal or formal---and
their consistent referencing. In the context of our CENELEC ontology, \<^eg>, we can translate the
third chapter of @{cite "bsi:50128:2014"} ``Terms, Definitions and Abbreviations'' directly
into our Ontology Definition Language (ODL). Picking one example out of 49, consider the definition
of the concept \<^cenelec_term>\<open>traceability\<close> in paragraphs 3.1.46 (a notion referenced 31 times in
the standard), which we translated directly into:
@{theory_text [display]\<open>
Definition*[traceability, short_name="''traceability''"]
\<open>degree to which relationship can be established between two or more products of a
development process, especially those having a predecessor/successor or
master/subordinate relationship to one another.\<close>
\<close>}
In the integrated source of the odometry study, we can reference in a text element to this
concept as follows:
@{theory_text [display]\<open>
text*[...]\<open> ... to assure <@>{cenelec_term traceability} for
<@>{requirement bitwiseAND}, we prove ... \<close>
\<close>}
\<^isadof> also uses the underlying ontology to generate the navigation markup inside the IDE, \<^ie>
the presentation of this document element inside \<^isadof> is immediately hyperlinked against the
@{theory_text \<open> Definition* \<close>}-element shown above; this serves as documentation of
the standard for the development team working on the integrated source. The PDF presentation
of such links depends on the actual configurations for the document generation; We will explain
this later.
CENELEC foresees also a number of roles, phases, safety integration levels, etc., which were
directly translated into HOL enumeration types usable in ontological concepts of ODL.
@{theory_text [display]\<open>
datatype role =
PM (* Program Manager *) | RQM (* Requirements Manager *)
| DES (* Designer *) | IMP (* Implementer *) |
| VER (* Verifier *) | VAL (* Validator *) | ...
datatype phase =
SYSDEV_ext (* System Development *) | SPl (* Software Planning *)
| SR (* Software Requirement *) | SA (* Software Architecture *)
| SDES (* Software Design *) | ...
\<close>}
Similarly, we can formalize the Table A.5: Verification and Testing of @{cite "bsi:50128:2014"}:
a classification of \<^emph>\<open>verification and testing techniques\<close>:
@{theory_text [display]\<open>
datatype vnt_technique =
formal_proof "thm list" | stat_analysis
| dyn_analysis dyn_ana_kind | ...
\<close>}
In contrast to the standard, we can parameterize \<open>formal_proof\<close> with a list of
theorems, an entity known in the Isabelle kernel. Here, \<^isadof> assures for text elements
annotated with theorem names, that they refer indeed to established theorems in the Isabelle
environment. Additional checks could be added to make sure that these theorems have a particular
form.
While we claim that this possibility to link to theorems (and test-results) is unique in the
world of systems attempting to assure \<^cenelec_term>\<open>traceability\<close>, referencing a particular
(proven) theorem is definitively not sufficient to satisfy the claimed requirement. Human
evaluators will always have to check that the provided theorem \<open>adequately\<close> represents the claim;
we do not in the slightest suggest that their work is superfluous. Our framework allows to
statically check that tests or proofs have been provided, at places where the ontology requires
them to be, and both assessors and developers can rely on this check and navigate through
related information easily. It does not guarantee that intended concepts for, \<^eg>, safety
or security have been adequately modeled.
\<close>
section*[moe::text_section]
\<open>Major Ontological Entities: Requirements and Evidence\<close>
text\<open>
We introduce central concept of a \<^emph>\<open>requirement\<close> as an ODL \<^theory_text>\<open>doc_class\<close>
based on the generic basic library \<^doc_class>\<open>text_element\<close> providing basic layout attributes.
@{theory_text [display]\<open>
doc_class requirement = text_element +
long_name :: "string option"
is_concerned :: "role set"
\<close>}
the groups of stakeholders in the CENELEC process. Therefore, the \<open>is_concerned\<close>-attribute
allows expressing who ``owns'' this text-element. \<^isadof> supports a role-based
presentation, \<^eg>, different presentation styles of the integrated source may decide to highlight,
to omit, to defer into an annex, text entities according to the role-set.
Since ODL supports single inheritance, we can express sub-requirements and therefore a style
of requirement decomposition as advocated in GSN~@{cite "kelly.ea:goal:2004"}:
@{theory_text [display]\<open>
doc_class sub_requirement =
decomposes :: "requirement"
relates_to :: "requirement set"
\<close>}
\<close>
section*[claimsreqevidence::text_section]\<open>Tracking Claims, Derived Requirements and Evidence\<close>
text\<open>An example for making explicit implicit principles,
consider the following statement @{cite "bsi:50128:2014"}, pp. 25.: \<^vs>\<open>-0.15cm\<close>
\begin{quote}\small
The objective of software verification is to examine and arrive at a judgment based on
evidence that output items (process, documentation, software or application) of a specific
development phase fulfill the requirements and plans with respect to completeness, correctness
and consistency.
\end{quote} \<^vs>\<open>-0.15cm\<close>
The terms \<^onto_class>\<open>judgement\<close> based on \<^term>\<open>evidence\<close> are used as a kind of leitmotif throughout
the CENELEC standard, but they are neither explained nor even listed in the general glossary.
However, the standard is fairly explicit on the \<^emph>\<open>phase\<close>s and the organizational roles that
different stakeholders should have in the process. Our version to express this key concept of
\<^onto_class>\<open>judgement\<close> , \<^eg>, by the following concept:
@{theory_text [display]\<open>
doc_class judgement =
refers_to :: requirement
evidence :: "vnt_technique list"
status :: status
is_concerned :: "role set" <= "{VER,ASR,VAL}"
\<close>}
As one can see, the role set is per default set to the verification team, the assessors and the
validation team.
There are different views possible here: an alternative would be to define \<^term>\<open>evidence\<close>
as ontological concept with \<^typ>\<open>vnt_technique\<close>'s (rather than an attribute of judgement)
and consider the basis of a summary containing the relation between requirements and relation:
@{theory_text [display]\<open>
doc_class summary =
based_on :: "(requirement \<times> evidence) set"
status :: status
is_concerned :: "role set" <= "{VER,ASR,VAL}"
\<close>}
More experimentation will be needed to find out what kind of ontological modeling is most
adequate for developers in the context of \isadof.
\<close>
section*[ontocontrol::text_section]\<open>Ontological Compliance\<close>
text\<open>From the variety of different possibilities for adding CENELEC annotations to the
integrated source, we will, in the following, point out three scenarios.\<close>
subsection\<open>Internal Verification of Claims in the Requirements Specification.\<close>
text\<open>In our case, the \<^term>\<open>SR\<close>-team early on detected a property necessary
for error-detection of the device (c.f. @{technical verific}):
@{theory_text [display]\<open>
text*[encoder_props::requirement]\<open> The requirement specification team identifies the property:
C1 & C2 & C3 = 0 (bitwise logical AND operation)
C1 | C2 | C3 = 1 (bitwise logical OR operation) \<close>
\<close>}
After the Isabelle proofs shown in @{technical verific}, we can either register the theorems
directly in an evidence statement:
@{theory_text [display]\<open>
text*[J1::judgement, refers_to="@{docitem <open>encoder_props<close>}",
evidence="[formal_proof[@{thm <open>Encoder_Property_1<close>},
@{thm <open>Encoder_Property_2<close>}]]"]
\<open>The required encoder properties are in fact verified to be consistent
with the formalization of @{term "phase\<^sub>0"}.\<close>
\<close>}
The references \<open>@{...}\<close>, called antiquotation, allow us not only to reference to
formal concepts, they are checked for consistency and there are also antiquotations that
print the formally checked content (\<^eg>, the statement of a theorem).
\<close>
subsection\<open>Exporting Claims of the Requirements Specification.\<close>
text\<open>By definition, the main purpose of the requirement specification is the identification of
the safety requirements. As an example, we state the required precision of an odometric function:
for any normally behaved distance function \<open>df\<close>, and any representable and valid
sampling sequence that can be constructed for \<open>df\<close>, we require that the difference
between the physical distance and distance calculable from the @{term Odometric_Position_Count}
is bound by the minimal resolution of the odometer.
@{theory_text [display]\<open>
text*[R5::safety_requirement]\<open>We can now state ... \<close>
definition Odometric_Position_Count_precise :: "(shaft_encoder_state list\<Rightarrow>output)\<Rightarrow>bool"
where "Odometric_Position_Count_precise odofunction \<equiv>
(\<forall> df. \<forall>S. normally_behaved_distance_function df
\<longrightarrow> representable S
\<longrightarrow> valid_sampling S df
\<longrightarrow> (let pos = uint(Odometric_Position_Count(odofunction S))
in \<bar>df((length S - 1)*\<delta>t\<^sub>o\<^sub>d\<^sub>o) - (\<delta>s\<^sub>r\<^sub>e\<^sub>s * pos)\<bar> \<le> \<delta>s\<^sub>r\<^sub>e\<^sub>s))"
update_instance*[R5::safety_requirement,
formal_definition:="[@{thm \<open>Odometric_Position_Count_precise_def\<close>}]"]
\<close>}
By \<^theory_text>\<open>update_instance*\<close>, we book the property \<open>Position_Count_precise_def\<close> as
\<^onto_class>\<open>safety_requirement\<close>, a specific sub-class of \<^onto_class>\<open>requirement\<close>s
requesting a formal definition in Isabelle.\<close>
subsection\<open>Exporting Derived Requirements.\<close>
text\<open>Finally, we discuss the situation where the verification team discovered a critical side-condition
for a major theorem necessary for the safety requirements; this was in our development the case for
the condition labelled ``\<open>**\<close>'' in @{docitem verific}. The current CENELEC standard clearly separates
``requirement specifications'' from ``verification reports,'' which is probably motivated
by the overall concern of organizational separation and of document consistency. While this
document organization is possible in \<^isadof>, it is in our experience often counter-productive
in practice: organizations tend to defend their documents because the impact of changes is more and more
difficult to oversee. This effect results in a dramatic development slow-down and an increase of
costs. Furthermore, these barriers exclude situations where developers perfectly know, for example,
invariants, but can not communicate them to the verification team because the precise formalization
is not known in time. Rather than advocating document separation, we tend to integrate these documents,
keep proof as close as possible to definitions, and plead for consequent version control of the
integrated source, together with the proposed methods to strengthen the links between the informal
and formal parts by anti-quotations and continuous ontological checking. Instead of separation
of the documents, we would rather emphasize the \<^emph>\<open>separation of the views\<close> of the different
document representations. Such views were systematically generated out of the integrated source in
different PDF versions and for each version, document specific consistency guarantees can be
automatically enforced.
In our case study, we define this condition as predicate, declare an explanation of it as
\<^onto_class>\<open>SRAC\<close> (CENELEC for: safety-related application condition; ontologically, this is a
derived class from \<^onto_class>\<open>requirement\<close>.) and add the definition of the predicate into the
document instance as described in the previous section.\<close>
chapter\<open>Appendix\<close>
text\<open>
\<^item> \<open>@{thm refl}\<close> : @{thm refl}
\<^item> \<open>@{thm [source] refl}\<close> : @{thm [source] refl}
\<^item> \<open>@{thm[mode=Rule] conjI}\<close> : @{thm[mode=Rule] conjI}
\<^item> \<open>@{file "mini_odo.thy"}\<close> : @{file "mini_odo.thy"}
\<^item> \<open>@{value "3+4::int"}}\<close> : @{value "3+4::int"}
\<^item> \<open>@{const hd}\<close> : @{const hd}
\<^item> \<open>@{theory HOL.List}\<close> : @{theory HOL.List}s
\<^item> \<open>@{tserm "3"}\<close> : @{term "3"}
\<^item> \<open>@{type bool}\<close> : @{type bool}
\<^item> \<open>@{thm term [show_types] "f x = a + x"}\<close> : @{term [show_types] "f x = a + x"}
\<close>
text\<open>Examples for declaration of typed doc-classes "assumption" (sic!) and "hypothesis" (sic!!),
concepts defined in the underlying ontology @{theory "Isabelle_DOF-Ontologies.CENELEC_50128"}. \<close>
text*[ass2::assumption, long_name="Some ''assumption one''"] \<open> The subsystem Y is safe. \<close>
text*[hyp1::hypothesis] \<open> \<open>P \<noteq> NP\<close> \<close>
text\<open>
A real example fragment from a larger project, declaring a text-element as a
"safety-related application condition", a concept defined in the
@{theory "Isabelle_DOF-Ontologies.CENELEC_50128"} ontology:\<close>
text*[hyp2::hypothesis]\<open>Under the assumption @{assumption \<open>ass2\<close>} we establish the following: ... \<close>
text*[ass122::SRAC, long_name="Some ''ass122''"]
\<open> The overall sampling frequence of the odometer subsystem is therefore 14 khz,
which includes sampling, computing and result communication times... \<close>
text*[ass123::SRAC]
\<open> The overall sampling frequence of the odometer subsystem is therefore 14 khz,
which includes sampling, computing and result communication times... \<close>
text*[ass124::EC, long_name="Some ''ass124''"]
\<open> The overall sampling frequence of the odometer subsystem is therefore 14 khz,
which includes sampling, computing and result communication times... \<close>
text*[t10::test_result]
\<open> This is a meta-test. This could be an ML-command that governs the external
test-execution via, \<^eg>, a makefile or specific calls to a test-environment or test-engine. \<close>
text \<open> Finally some examples of references to doc-items, i.e. text-elements
with declared meta-information and status. \<close>
text \<open> As established by @{test_result \<open>t10\<close>}\<close>
text \<open> the @{test_result \<open>t10\<close>}
as well as the @{SRAC \<open>ass122\<close>}\<close>
text \<open> represent a justification of the safety related applicability
condition @{SRAC \<open>ass122\<close>} aka exported constraint @{EC \<open>ass122\<close>}.\<close>
text \<open> due to notational conventions for antiquotations, one may even write:
"represent a justification of the safety related applicability
condition \<^SRAC>\<open>ass122\<close> aka exported constraint \<^EC>\<open>ass122\<close>."\<close>
(*<*)
end
(*>*)

View File

@ -1,12 +0,0 @@
# Examples
## [Scholarly (Academic) Papers](./scholarly_paper/)
The examples in the directory [`scholarly_paper`](./scholarly_paper/)
are examples of typical conference papers (usually, in computer science).
## [Technical Reports](./technical_report/)
The examples in the directory [`technical_report`](./technical_report/)
are examples of typical technical reports. This includes also the
[Isabelle/DOF User and Implementation Manual](./technical_report/IsaDof_Manual).

View File

@ -1,5 +0,0 @@
technical_report
CENELEC_50128
cytology
CC_ISO15408
beamerx

View File

@ -1,2 +0,0 @@
poster
presentation

View File

@ -1,8 +0,0 @@
chapter AFP
session "poster-example" (AFP) = "Isabelle_DOF-Ontologies" +
options [document = pdf, document_output = "output", document_build = dof, timeout = 300]
theories
"poster"
document_files
"preamble.tex"

View File

@ -1,2 +0,0 @@
%% This is a placeholder for user-specific configuration and packages.

View File

@ -1,39 +0,0 @@
(*<*)
theory "poster"
imports "Isabelle_DOF.scholarly_paper"
"Isabelle_DOF-Ontologies.document_templates"
begin
use_template "beamerposter-UNSUPPORTED"
use_ontology "scholarly_paper"
(*>*)
title*[tit::title]\<open>Example Presentation\<close>
author*[safouan,email="\<open>example@example.org\<close>",affiliation="\<open>Example Org\<close>"]\<open>Eliza Example\<close>
text\<open>
\vfill
\begin{block}{\large Fontsizes}
\centering
{\tiny tiny}\par
{\scriptsize scriptsize}\par
{\footnotesize footnotesize}\par
{\normalsize normalsize}\par
{\large large}\par
{\Large Large}\par
{\LARGE LARGE}\par
{\veryHuge veryHuge}\par
{\VeryHuge VeryHuge}\par
{\VERYHuge VERYHuge}\par
\end{block}
\vfill
\<close>
text\<open>
@{block (title = "\<open>Title\<^sub>t\<^sub>e\<^sub>s\<^sub>t\<close>") "\<open>Block content\<^sub>t\<^sub>e\<^sub>s\<^sub>t\<close>"}
\<close>
(*<*)
end
(*>*)

View File

@ -1,9 +0,0 @@
chapter AFP
session "presentation-example" (AFP) = "Isabelle_DOF-Ontologies" +
options [document = pdf, document_output = "output", document_build = dof, timeout = 300]
theories
"presentation"
document_files
"preamble.tex"
"figures/A.png"

Binary file not shown.

Before

Width:  |  Height:  |  Size: 12 KiB

View File

@ -1,2 +0,0 @@
%% This is a placeholder for user-specific configuration and packages.

View File

@ -1,69 +0,0 @@
(*<*)
theory "presentation"
imports "Isabelle_DOF.scholarly_paper"
"Isabelle_DOF-Ontologies.document_templates"
begin
use_template "beamer-UNSUPPORTED"
use_ontology "scholarly_paper"
(*>*)
title*[tit::title]\<open>Example Presentation\<close>
author*[safouan,email="\<open>example@example.org\<close>",affiliation="\<open>Example Org\<close>"]\<open>Eliza Example\<close>
text\<open>
\begin{frame}
\frametitle{Example Slide}
\centering\huge This is an example!
\end{frame}
\<close>
frame*[test_frame
, frametitle = \<open>\<open>\<open>Example Slide\<^sub>t\<^sub>e\<^sub>s\<^sub>t\<close> with items @{thm "HOL.refl"}\<close>\<close>
, framesubtitle = "''Subtitle''"]
\<open>This is an example!
\<^item> The term \<^term>\<open>refl\<close> is...
\<^item> and the term encoding the title of this frame is \<^term_>\<open>frametitle @{frame \<open>test_frame\<close>}\<close>\<close>
frame*[test_frame2
, frametitle = "''Example Slide''"
, framesubtitle = \<open>\<open>\<open>Subtitle\<^sub>t\<^sub>e\<^sub>s\<^sub>t:\<close> the value of \<^term>\<open>(3::int) + 3\<close> is @{value "(3::int) + 3"}\<close>\<close>]
\<open>Test frame env \<^term>\<open>refl\<close>\<close>
frame*[test_frame3, frametitle = "''A slide with a Figure''"]
\<open>A figure
@{figure_content (width=45, caption=\<open>\<open>Figure\<^sub>t\<^sub>e\<^sub>s\<^sub>t\<close> is not the \<^term>\<open>refl\<close> theorem (@{thm "refl"}).\<close>)
"figures/A.png"}\<close>
frame*[test_frame4
, options = "''allowframebreaks''"
, frametitle = "''Example Slide with frame break''"
, framesubtitle = \<open>\<open>\<open>Subtitle\<^sub>t\<^sub>e\<^sub>s\<^sub>t:\<close> the value of \<^term>\<open>(3::int) + 3\<close> is @{value "(3::int) + 3"}\<close>\<close>]
\<open>
\<^item> The term \<^term>\<open>refl\<close> is...
\<^item> and the term encoding the title of this frame is \<^term_>\<open>frametitle @{frame \<open>test_frame4\<close>}\<close>
\<^item> The term \<^term>\<open>refl\<close> is...
\<^item> The term \<^term>\<open>refl\<close> is...
\<^item> The term \<^term>\<open>refl\<close> is...
\<^item> The term \<^term>\<open>refl\<close> is...
\<^item> The term \<^term>\<open>refl\<close> is...
\<^item> The term \<^term>\<open>refl\<close> is...
\<^item> The term \<^term>\<open>refl\<close> is...
\<^item> The term \<^term>\<open>refl\<close> is...
\<^item> The term \<^term>\<open>refl\<close> is...
\<^item> The term \<^term>\<open>refl\<close> is...
\<^item> The term \<^term>\<open>refl\<close> is...
\<^item> The term \<^term>\<open>refl\<close> is...
\<^item> The term \<^term>\<open>refl\<close> is...
\<^item> The term \<^term>\<open>refl\<close> is...
\<^item> The term \<^term>\<open>refl\<close> is...
\<^item> The term \<^term>\<open>refl\<close> is...
\<^item> The term \<^term>\<open>refl\<close> is...
\<^item> The term \<^term>\<open>refl\<close> is...
\<close>
(*<*)
end
(*>*)

View File

@ -1,87 +0,0 @@
theory Cytology
imports "Isabelle_DOF.scholarly_paper"
begin
text\<open>A small example ontology for demonstration purposes.
The presentation follows closely: \<^url>\<open>https://www.youtube.com/watch?v=URUJD5NEXC8\<close>.\<close>
datatype protein = filaments | motor_proteins | rna | dna |nucleolus
type_synonym desc = "string"
onto_class organelles = description :: desc
find_theorems (60) name:"organelles"
term "Cytology.organelles.make"
onto_class ribosomes = organelles + description :: desc
onto_class mytochondria = organelles + description :: desc
onto_class golgi_apparatus = organelles + description :: desc
onto_class lysosome = organelles + description :: desc
text\<open>the control center of the cell:\<close>
onto_class nucleus = organelles +
description :: desc
components :: "protein list" <= "[nucleolus]"
(* Not so nice construction to mimick inheritance on types useds in attribute positions. *)
datatype organelles' = upcast\<^sub>r\<^sub>i\<^sub>b\<^sub>o\<^sub>s\<^sub>o\<^sub>m\<^sub>e\<^sub>s (get_ribosomes:ribosomes)
| upcast\<^sub>m\<^sub>y\<^sub>t\<^sub>o\<^sub>c\<^sub>h\<^sub>o\<^sub>n\<^sub>d\<^sub>r\<^sub>i\<^sub>a (get_mytochondria:mytochondria)
| upcast\<^sub>g\<^sub>o\<^sub>l\<^sub>g\<^sub>i\<^sub>_\<^sub>a\<^sub>p\<^sub>p\<^sub>a\<^sub>r\<^sub>a\<^sub>t\<^sub>u\<^sub>s (get_golgi_apparatus: golgi_apparatus)
| upcast\<^sub>l\<^sub>y\<^sub>s\<^sub>o\<^sub>s\<^sub>o\<^sub>m\<^sub>e (get_lysosome : lysosome)
| upcast\<^sub>n\<^sub>u\<^sub>c\<^sub>l\<^sub>e\<^sub>u\<^sub>s (get_nucleus : nucleus)
fun is\<^sub>n\<^sub>u\<^sub>c\<^sub>l\<^sub>e\<^sub>u\<^sub>s where "is\<^sub>n\<^sub>u\<^sub>c\<^sub>l\<^sub>e\<^sub>u\<^sub>s (upcast\<^sub>n\<^sub>u\<^sub>c\<^sub>l\<^sub>e\<^sub>u\<^sub>s X) = True" | "is\<^sub>n\<^sub>u\<^sub>c\<^sub>l\<^sub>e\<^sub>u\<^sub>s ( _) = False"
(* ... *)
fun downcast\<^sub>r\<^sub>i\<^sub>b\<^sub>o\<^sub>s\<^sub>o\<^sub>m\<^sub>e\<^sub>s
where "downcast\<^sub>r\<^sub>i\<^sub>b\<^sub>o\<^sub>s\<^sub>o\<^sub>m\<^sub>e\<^sub>s (upcast\<^sub>r\<^sub>i\<^sub>b\<^sub>o\<^sub>s\<^sub>o\<^sub>m\<^sub>e\<^sub>s X) = X" | "downcast\<^sub>r\<^sub>i\<^sub>b\<^sub>o\<^sub>s\<^sub>o\<^sub>m\<^sub>e\<^sub>s _ = undefined"
fun downcast\<^sub>m\<^sub>y\<^sub>t\<^sub>o\<^sub>c\<^sub>h\<^sub>o\<^sub>n\<^sub>d\<^sub>r\<^sub>i\<^sub>a
where "downcast\<^sub>m\<^sub>y\<^sub>t\<^sub>o\<^sub>c\<^sub>h\<^sub>o\<^sub>n\<^sub>d\<^sub>r\<^sub>i\<^sub>a (upcast\<^sub>m\<^sub>y\<^sub>t\<^sub>o\<^sub>c\<^sub>h\<^sub>o\<^sub>n\<^sub>d\<^sub>r\<^sub>i\<^sub>a X) = X" | "downcast\<^sub>m\<^sub>y\<^sub>t\<^sub>o\<^sub>c\<^sub>h\<^sub>o\<^sub>n\<^sub>d\<^sub>r\<^sub>i\<^sub>a _ = undefined"
fun downcast\<^sub>g\<^sub>o\<^sub>l\<^sub>g\<^sub>i\<^sub>_\<^sub>a\<^sub>p\<^sub>p\<^sub>a\<^sub>r\<^sub>a\<^sub>t\<^sub>u\<^sub>s
where "downcast\<^sub>g\<^sub>o\<^sub>l\<^sub>g\<^sub>i\<^sub>_\<^sub>a\<^sub>p\<^sub>p\<^sub>a\<^sub>r\<^sub>a\<^sub>t\<^sub>u\<^sub>s (upcast\<^sub>g\<^sub>o\<^sub>l\<^sub>g\<^sub>i\<^sub>_\<^sub>a\<^sub>p\<^sub>p\<^sub>a\<^sub>r\<^sub>a\<^sub>t\<^sub>u\<^sub>s X) = X" | "downcast\<^sub>g\<^sub>o\<^sub>l\<^sub>g\<^sub>i\<^sub>_\<^sub>a\<^sub>p\<^sub>p\<^sub>a\<^sub>r\<^sub>a\<^sub>t\<^sub>u\<^sub>s _ = undefined"
fun downcast\<^sub>l\<^sub>y\<^sub>s\<^sub>o\<^sub>s\<^sub>o\<^sub>m\<^sub>e
where "downcast\<^sub>l\<^sub>y\<^sub>s\<^sub>o\<^sub>s\<^sub>o\<^sub>m\<^sub>e (upcast\<^sub>l\<^sub>y\<^sub>s\<^sub>o\<^sub>s\<^sub>o\<^sub>m\<^sub>e X) = X" | "downcast\<^sub>l\<^sub>y\<^sub>s\<^sub>o\<^sub>s\<^sub>o\<^sub>m\<^sub>e _ = undefined"
fun downcast\<^sub>n\<^sub>u\<^sub>c\<^sub>l\<^sub>e\<^sub>u\<^sub>s
where "downcast\<^sub>n\<^sub>u\<^sub>c\<^sub>l\<^sub>e\<^sub>u\<^sub>s (upcast\<^sub>n\<^sub>u\<^sub>c\<^sub>l\<^sub>e\<^sub>u\<^sub>s X) = X" | "downcast\<^sub>n\<^sub>u\<^sub>c\<^sub>l\<^sub>e\<^sub>u\<^sub>s _ = undefined"
onto_class cell =
name :: string
membrane :: desc <= "\<open>The outer boundary of the cell\<close>"
cytoplasm :: desc <= "\<open>The liquid in the cell\<close>"
cytoskeleton :: desc <= "\<open>includes the thread-like microfilaments\<close>"
genetic_material :: "protein list" <= "[rna, dna]"
text\<open>Cells are devided into two categories: \<^emph>\<open>procaryotic\<close> cells (unicellular organisms some
bacteria) without a substructuring in organelles and \<^emph>\<open>eucaryotic\<close> cells, as occurring in
pluricellular organisms\<close>
onto_class procaryotic_cells = cell +
name :: string
onto_class eucaryotic_cells = cell +
organelles :: "organelles' list"
invariant has_nucleus :: "\<exists> org \<in> set (organelles \<sigma>). is\<^sub>n\<^sub>u\<^sub>c\<^sub>l\<^sub>e\<^sub>u\<^sub>s org"
\<comment> \<open>Cells must have at least one nucleus. However, this should be executable.\<close>
find_theorems (70)name:"eucaryotic_cells"
find_theorems name:has_nucleus
value "is\<^sub>n\<^sub>u\<^sub>c\<^sub>l\<^sub>e\<^sub>u\<^sub>s (mk\<^sub>n\<^sub>u\<^sub>c\<^sub>l\<^sub>e\<^sub>u\<^sub>s X)"
term \<open>eucaryotic_cells.organelles\<close>
value \<open>(eucaryotic_cells.organelles(eucaryotic_cells.make X Y Z Z Z [] []))\<close>
value \<open>has_nucleus_inv(eucaryotic_cells.make X Y Z Z Z [] [])\<close>
value \<open>has_nucleus_inv(eucaryotic_cells.make X Y Z Z Z [] [upcast\<^sub>n\<^sub>u\<^sub>c\<^sub>l\<^sub>e\<^sub>u\<^sub>s (nucleus.make a b c )])\<close>
end

View File

@ -1,4 +0,0 @@
session "Cytology" = "Isabelle_DOF" +
options [document = false]
theories
"Cytology"

View File

@ -1 +0,0 @@
TR_my_commented_isabelle

View File

@ -1,17 +0,0 @@
session "TR_MyCommentedIsabelle" = "Isabelle_DOF" +
options [document = pdf, document_output = "output", document_build = dof]
theories
"TR_MyCommentedIsabelle"
document_files
"root.bib"
"preamble.tex"
"prooftree.sty"
"figures/markup-demo.png"
"figures/text-element.pdf"
"figures/isabelle-architecture.pdf"
"figures/pure-inferences-I.pdf"
"figures/pure-inferences-II.pdf"
"figures/document-model.pdf"
"figures/MyCommentedIsabelle.png"

View File

@ -1,20 +0,0 @@
%% Copyright (C) 2018 The University of Sheffield
%% 2018 The University of Paris-Saclay
%%
%% License:
%% This program can be redistributed and/or modified under the terms
%% of the LaTeX Project Public License Distributed from CTAN
%% archives in directory macros/latex/base/lppl.txt; either
%% version 1.3c of the License, or (at your option) any later version.
%% OR
%% The 2-clause BSD-style license.
%%
%% SPDX-License-Identifier: LPPL-1.3c+ OR BSD-2-Clause
%% This is a placeholder for user-specific configuration and packages.
\renewcommand{\isasymtheta}{\texorpdfstring{\isamath{\vartheta}}{ϑ}}
\usepackage{prooftree}
\title{<TITLE>}
\author{<AUTHOR>}

View File

@ -1,347 +0,0 @@
\message{<Paul Taylor's Proof Trees, 2 August 1996>}
%% Build proof tree for Natural Deduction, Sequent Calculus, etc.
%% WITH SHORTENING OF PROOF RULES!
%% Paul Taylor, begun 10 Oct 1989
%% *** THIS IS ONLY A PRELIMINARY VERSION AND THINGS MAY CHANGE! ***
%%
%% 2 Aug 1996: fixed \mscount and \proofdotnumber
%%
%% \prooftree
%% hyp1 produces:
%% hyp2
%% hyp3 hyp1 hyp2 hyp3
%% \justifies -------------------- rulename
%% concl concl
%% \thickness=0.08em
%% \shiftright 2em
%% \using
%% rulename
%% \endprooftree
%%
%% where the hypotheses may be similar structures or just formulae.
%%
%% To get a vertical string of dots instead of the proof rule, do
%%
%% \prooftree which produces:
%% [hyp]
%% \using [hyp]
%% name .
%% \proofdotseparation=1.2ex .name
%% \proofdotnumber=4 .
%% \leadsto .
%% concl concl
%% \endprooftree
%%
%% Within a prooftree, \[ and \] may be used instead of \prooftree and
%% \endprooftree; this is not permitted at the outer level because it
%% conflicts with LaTeX. Also,
%% \Justifies
%% produces a double line. In LaTeX you can use \begin{prooftree} and
%% \end{prootree} at the outer level (however this will not work for the inner
%% levels, but in any case why would you want to be so verbose?).
%%
%% All of of the keywords except \prooftree and \endprooftree are optional
%% and may appear in any order. They may also be combined in \newcommand's
%% eg "\def\Cut{\using\sf cut\thickness.08em\justifies}" with the abbreviation
%% "\prooftree hyp1 hyp2 \Cut \concl \endprooftree". This is recommended and
%% some standard abbreviations will be found at the end of this file.
%%
%% \thickness specifies the breadth of the rule in any units, although
%% font-relative units such as "ex" or "em" are preferable.
%% It may optionally be followed by "=".
%% \proofrulebreadth=.08em or \setlength\proofrulebreadth{.08em} may also be
%% used either in place of \thickness or globally; the default is 0.04em.
%% \proofdotseparation and \proofdotnumber control the size of the
%% string of dots
%%
%% If proof trees and formulae are mixed, some explicit spacing is needed,
%% but don't put anything to the left of the left-most (or the right of
%% the right-most) hypothesis, or put it in braces, because this will cause
%% the indentation to be lost.
%%
%% By default the conclusion is centered wrt the left-most and right-most
%% immediate hypotheses (not their proofs); \shiftright or \shiftleft moves
%% it relative to this position. (Not sure about this specification or how
%% it should affect spreading of proof tree.)
%
% global assignments to dimensions seem to have the effect of stretching
% diagrams horizontally.
%
%%==========================================================================
\def\introrule{{\cal I}}\def\elimrule{{\cal E}}%%
\def\andintro{\using{\land}\introrule\justifies}%%
\def\impelim{\using{\Rightarrow}\elimrule\justifies}%%
\def\allintro{\using{\forall}\introrule\justifies}%%
\def\allelim{\using{\forall}\elimrule\justifies}%%
\def\falseelim{\using{\bot}\elimrule\justifies}%%
\def\existsintro{\using{\exists}\introrule\justifies}%%
%% #1 is meant to be 1 or 2 for the first or second formula
\def\andelim#1{\using{\land}#1\elimrule\justifies}%%
\def\orintro#1{\using{\lor}#1\introrule\justifies}%%
%% #1 is meant to be a label corresponding to the discharged hypothesis/es
\def\impintro#1{\using{\Rightarrow}\introrule_{#1}\justifies}%%
\def\orelim#1{\using{\lor}\elimrule_{#1}\justifies}%%
\def\existselim#1{\using{\exists}\elimrule_{#1}\justifies}
%%==========================================================================
\newdimen\proofrulebreadth \proofrulebreadth=.05em
\newdimen\proofdotseparation \proofdotseparation=1.25ex
\newdimen\proofrulebaseline \proofrulebaseline=2ex
\newcount\proofdotnumber \proofdotnumber=3
\let\then\relax
\def\hfi{\hskip0pt plus.0001fil}
\mathchardef\squigto="3A3B
%
% flag where we are
\newif\ifinsideprooftree\insideprooftreefalse
\newif\ifonleftofproofrule\onleftofproofrulefalse
\newif\ifproofdots\proofdotsfalse
\newif\ifdoubleproof\doubleprooffalse
\let\wereinproofbit\relax
%
% dimensions and boxes of bits
\newdimen\shortenproofleft
\newdimen\shortenproofright
\newdimen\proofbelowshift
\newbox\proofabove
\newbox\proofbelow
\newbox\proofrulename
%
% miscellaneous commands for setting values
\def\shiftproofbelow{\let\next\relax\afterassignment\setshiftproofbelow\dimen0 }
\def\shiftproofbelowneg{\def\next{\multiply\dimen0 by-1 }%
\afterassignment\setshiftproofbelow\dimen0 }
\def\setshiftproofbelow{\next\proofbelowshift=\dimen0 }
\def\setproofrulebreadth{\proofrulebreadth}
%=============================================================================
\def\prooftree{% NESTED ZERO (\ifonleftofproofrule)
%
% first find out whether we're at the left-hand end of a proof rule
\ifnum \lastpenalty=1
\then \unpenalty
\else \onleftofproofrulefalse
\fi
%
% some space on left (except if we're on left, and no infinity for outermost)
\ifonleftofproofrule
\else \ifinsideprooftree
\then \hskip.5em plus1fil
\fi
\fi
%
% begin our proof tree environment
\bgroup% NESTED ONE (\proofbelow, \proofrulename, \proofabove,
% \shortenproofleft, \shortenproofright, \proofrulebreadth)
\setbox\proofbelow=\hbox{}\setbox\proofrulename=\hbox{}%
\let\justifies\proofover\let\leadsto\proofoverdots\let\Justifies\proofoverdbl
\let\using\proofusing\let\[\prooftree
\ifinsideprooftree\let\]\endprooftree\fi
\proofdotsfalse\doubleprooffalse
\let\thickness\setproofrulebreadth
\let\shiftright\shiftproofbelow \let\shift\shiftproofbelow
\let\shiftleft\shiftproofbelowneg
\let\ifwasinsideprooftree\ifinsideprooftree
\insideprooftreetrue
%
% now begin to set the top of the rule (definitions local to it)
\setbox\proofabove=\hbox\bgroup$\displaystyle % NESTED TWO
\let\wereinproofbit\prooftree
%
% these local variables will be copied out:
\shortenproofleft=0pt \shortenproofright=0pt \proofbelowshift=0pt
%
% flags to enable inner proof tree to detect if on left:
\onleftofproofruletrue\penalty1
}
%=============================================================================
% end whatever box and copy crucial values out of it
\def\eproofbit{% NESTED TWO
%
% various hacks applicable to hypothesis list
\ifx \wereinproofbit\prooftree
\then \ifcase \lastpenalty
\then \shortenproofright=0pt % 0: some other object, no indentation
\or \unpenalty\hfil % 1: empty hypotheses, just glue
\or \unpenalty\unskip % 2: just had a tree, remove glue
\else \shortenproofright=0pt % eh?
\fi
\fi
%
% pass out crucial values from scope
\global\dimen0=\shortenproofleft
\global\dimen1=\shortenproofright
\global\dimen2=\proofrulebreadth
\global\dimen3=\proofbelowshift
\global\dimen4=\proofdotseparation
\global\count255=\proofdotnumber
%
% end the box
$\egroup % NESTED ONE
%
% restore the values
\shortenproofleft=\dimen0
\shortenproofright=\dimen1
\proofrulebreadth=\dimen2
\proofbelowshift=\dimen3
\proofdotseparation=\dimen4
\proofdotnumber=\count255
}
%=============================================================================
\def\proofover{% NESTED TWO
\eproofbit % NESTED ONE
\setbox\proofbelow=\hbox\bgroup % NESTED TWO
\let\wereinproofbit\proofover
$\displaystyle
}%
%
%=============================================================================
\def\proofoverdbl{% NESTED TWO
\eproofbit % NESTED ONE
\doubleprooftrue
\setbox\proofbelow=\hbox\bgroup % NESTED TWO
\let\wereinproofbit\proofoverdbl
$\displaystyle
}%
%
%=============================================================================
\def\proofoverdots{% NESTED TWO
\eproofbit % NESTED ONE
\proofdotstrue
\setbox\proofbelow=\hbox\bgroup % NESTED TWO
\let\wereinproofbit\proofoverdots
$\displaystyle
}%
%
%=============================================================================
\def\proofusing{% NESTED TWO
\eproofbit % NESTED ONE
\setbox\proofrulename=\hbox\bgroup % NESTED TWO
\let\wereinproofbit\proofusing
\kern0.3em$
}
%=============================================================================
\def\endprooftree{% NESTED TWO
\eproofbit % NESTED ONE
% \dimen0 = length of proof rule
% \dimen1 = indentation of conclusion wrt rule
% \dimen2 = new \shortenproofleft, ie indentation of conclusion
% \dimen3 = new \shortenproofright, ie
% space on right of conclusion to end of tree
% \dimen4 = space on right of conclusion below rule
\dimen5 =0pt% spread of hypotheses
% \dimen6, \dimen7 = height & depth of rule
%
% length of rule needed by proof above
\dimen0=\wd\proofabove \advance\dimen0-\shortenproofleft
\advance\dimen0-\shortenproofright
%
% amount of spare space below
\dimen1=.5\dimen0 \advance\dimen1-.5\wd\proofbelow
\dimen4=\dimen1
\advance\dimen1\proofbelowshift \advance\dimen4-\proofbelowshift
%
% conclusion sticks out to left of immediate hypotheses
\ifdim \dimen1<0pt
\then \advance\shortenproofleft\dimen1
\advance\dimen0-\dimen1
\dimen1=0pt
% now it sticks out to left of tree!
\ifdim \shortenproofleft<0pt
\then \setbox\proofabove=\hbox{%
\kern-\shortenproofleft\unhbox\proofabove}%
\shortenproofleft=0pt
\fi
\fi
%
% and to the right
\ifdim \dimen4<0pt
\then \advance\shortenproofright\dimen4
\advance\dimen0-\dimen4
\dimen4=0pt
\fi
%
% make sure enough space for label
\ifdim \shortenproofright<\wd\proofrulename
\then \shortenproofright=\wd\proofrulename
\fi
%
% calculate new indentations
\dimen2=\shortenproofleft \advance\dimen2 by\dimen1
\dimen3=\shortenproofright\advance\dimen3 by\dimen4
%
% make the rule or dots, with name attached
\ifproofdots
\then
\dimen6=\shortenproofleft \advance\dimen6 .5\dimen0
\setbox1=\vbox to\proofdotseparation{\vss\hbox{$\cdot$}\vss}%
\setbox0=\hbox{%
\advance\dimen6-.5\wd1
\kern\dimen6
$\vcenter to\proofdotnumber\proofdotseparation
{\leaders\box1\vfill}$%
\unhbox\proofrulename}%
\else \dimen6=\fontdimen22\the\textfont2 % height of maths axis
\dimen7=\dimen6
\advance\dimen6by.5\proofrulebreadth
\advance\dimen7by-.5\proofrulebreadth
\setbox0=\hbox{%
\kern\shortenproofleft
\ifdoubleproof
\then \hbox to\dimen0{%
$\mathsurround0pt\mathord=\mkern-6mu%
\cleaders\hbox{$\mkern-2mu=\mkern-2mu$}\hfill
\mkern-6mu\mathord=$}%
\else \vrule height\dimen6 depth-\dimen7 width\dimen0
\fi
\unhbox\proofrulename}%
\ht0=\dimen6 \dp0=-\dimen7
\fi
%
% set up to centre outermost tree only
\let\doll\relax
\ifwasinsideprooftree
\then \let\VBOX\vbox
\else \ifmmode\else$\let\doll=$\fi
\let\VBOX\vcenter
\fi
% this \vbox or \vcenter is the actual output:
\VBOX {\baselineskip\proofrulebaseline \lineskip.2ex
\expandafter\lineskiplimit\ifproofdots0ex\else-0.6ex\fi
\hbox spread\dimen5 {\hfi\unhbox\proofabove\hfi}%
\hbox{\box0}%
\hbox {\kern\dimen2 \box\proofbelow}}\doll%
%
% pass new indentations out of scope
\global\dimen2=\dimen2
\global\dimen3=\dimen3
\egroup % NESTED ZERO
\ifonleftofproofrule
\then \shortenproofleft=\dimen2
\fi
\shortenproofright=\dimen3
%
% some space on right and flag we've just made a tree
\onleftofproofrulefalse
\ifinsideprooftree
\then \hskip.5em plus 1fil \penalty2
\fi
}
%==========================================================================
% IDEAS
% 1. Specification of \shiftright and how to spread trees.
% 2. Spacing command \m which causes 1em+1fil spacing, over-riding
% exisiting space on sides of trees and not affecting the
% detection of being on the left or right.
% 3. Hack using \@currenvir to detect LaTeX environment; have to
% use \aftergroup to pass \shortenproofleft/right out.
% 4. (Pie in the sky) detect how much trees can be "tucked in"
% 5. Discharged hypotheses (diagonal lines).

View File

@ -1,528 +0,0 @@
@misc{bockenek:hal-02069705,
TITLE = {{Using Isabelle/UTP for the Verification of Sorting Algorithms A Case Study}},
AUTHOR = {Bockenek, Joshua A and Lammich, Peter and Nemouchi, Yakoub and Wolff, Burkhart},
URL = {https://easychair.org/publications/preprint/CxRV},
NOTE = {Isabelle Workshop 2018, Colocated with Interactive Theorem Proving. As part of FLOC 2018, Oxford, GB.},
YEAR = {2018},
MONTH = Jul
}
@book{DBLP:books/sp/NipkowPW02,
author = {Tobias Nipkow and
Lawrence C. Paulson and
Markus Wenzel},
title = {Isabelle/HOL - {A} Proof Assistant for Higher-Order Logic},
series = {Lecture Notes in Computer Science},
volume = {2283},
publisher = {Springer},
year = {2002},
url = {https://doi.org/10.1007/3-540-45949-9},
deactivated_doi = {10.1007/3-540-45949-9},
isbn = {3-540-43376-7},
timestamp = {Tue, 14 May 2019 10:00:35 +0200},
biburl = {https://dblp.org/rec/bib/books/sp/NipkowPW02},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
@inproceedings{DBLP:conf/sosp/KleinEHACDEEKNSTW09,
author = {Gerwin Klein and
Kevin Elphinstone and
Gernot Heiser and
June Andronick and
David Cock and
Philip Derrin and
Dhammika Elkaduwe and
Kai Engelhardt and
Rafal Kolanski and
Michael Norrish and
Thomas Sewell and
Harvey Tuch and
Simon Winwood},
title = {seL4: formal verification of an {OS} kernel},
deactivated_booktitle = {Proceedings of the 22nd {ACM} Symposium on Operating Systems Principles
2009, {SOSP} 2009, Big Sky, Montana, USA, October 11-14, 2009},
pages = {207--220},
year = {2009},
crossref = {DBLP:conf/sosp/2009},
url = {https://doi.org/10.1145/1629575.1629596},
deactivated_doi = {10.1145/1629575.1629596},
timestamp = {Tue, 06 Nov 2018 16:59:32 +0100},
biburl = {https://dblp.org/rec/bib/conf/sosp/KleinEHACDEEKNSTW09},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
@proceedings{DBLP:conf/sosp/2009,
editor = {Jeanna Neefe Matthews and
Thomas E. Anderson},
title = {Proceedings of the 22nd {ACM} Symposium on Operating Systems Principles
2009, {SOSP} 2009, Big Sky, Montana, USA, October 11-14, 2009},
publisher = {{ACM}},
year = {2009},
url = {https://doi.org/10.1145/1629575},
deactivated_doi = {10.1145/1629575},
isbn = {978-1-60558-752-3},
timestamp = {Tue, 06 Nov 2018 16:59:32 +0100},
biburl = {https://dblp.org/rec/bib/conf/sosp/2009},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
@inproceedings{DBLP:conf/tphol/CohenDHLMSST09,
author = {Ernie Cohen and
Markus Dahlweid and
Mark A. Hillebrand and
Dirk Leinenbach and
Michal Moskal and
Thomas Santen and
Wolfram Schulte and
Stephan Tobies},
title = {{VCC:} {A} Practical System for Verifying Concurrent {C}},
deactivated_booktitle = {Theorem Proving in Higher Order Logics, 22nd International Conference,
TPHOLs 2009, Munich, Germany, August 17-20, 2009. Proceedings},
pages = {23--42},
year = {2009},
url = {https://doi.org/10.1007/978-3-642-03359-9_2},
deactivated_doi = {10.1007/978-3-642-03359-9_2},
timestamp = {Tue, 23 May 2017 01:12:08 +0200},
biburl = {https://dblp.org/rec/bib/conf/tphol/CohenDHLMSST09},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
@article{DBLP:journals/cacm/Leroy09,
author = {Xavier Leroy},
title = {Formal verification of a realistic compiler},
journal = {Commun. {ACM}},
volume = {52},
number = {7},
pages = {107--115},
year = {2009},
url = {http://doi.acm.org/10.1145/1538788.1538814},
deactivated_doi = {10.1145/1538788.1538814},
timestamp = {Thu, 02 Jul 2009 13:36:32 +0200},
biburl = {https://dblp.org/rec/bib/journals/cacm/Leroy09},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
@inproceedings{DBLP:conf/itp/Wenzel14,
author = {Makarius Wenzel},
title = {Asynchronous User Interaction and Tool Integration in Isabelle/PIDE},
deactivated_booktitle = {Interactive Theorem Proving - 5th International Conference, {ITP}
2014, Held as Part of the Vienna Summer of Logic, {VSL} 2014, Vienna,
Austria, July 14-17, 2014. Proceedings},
pages = {515--530},
year = {2014},
crossref = {DBLP:conf/itp/2014},
url = {https://doi.org/10.1007/978-3-319-08970-6\_33},
deactivated_doi = {10.1007/978-3-319-08970-6\_33},
timestamp = {Tue, 14 May 2019 10:00:37 +0200},
biburl = {https://dblp.org/rec/bib/conf/itp/Wenzel14},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
@proceedings{DBLP:conf/itp/2014,
editor = {Gerwin Klein and
Ruben Gamboa},
title = {Interactive Theorem Proving - 5th International Conference, {ITP}
2014, Held as Part of the Vienna Summer of Logic, {VSL} 2014, Vienna,
Austria, July 14-17, 2014. Proceedings},
series = {Lecture Notes in Computer Science},
volume = {8558},
publisher = {Springer},
year = {2014},
url = {https://doi.org/10.1007/978-3-319-08970-6},
deactivated_doi = {10.1007/978-3-319-08970-6},
isbn = {978-3-319-08969-0},
timestamp = {Tue, 14 May 2019 10:00:37 +0200},
biburl = {https://dblp.org/rec/bib/conf/itp/2014},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
@inproceedings{DBLP:journals/corr/Wenzel14,
author = {Makarius Wenzel},
title = {System description: Isabelle/jEdit in 2014},
deactivated_booktitle = {Proceedings Eleventh Workshop on User Interfaces for Theorem Provers,
{UITP} 2014, Vienna, Austria, 17th July 2014.},
pages = {84--94},
year = {2014},
crossref = {DBLP:journals/corr/BenzmullerP14},
url = {https://doi.org/10.4204/EPTCS.167.10},
deactivated_doi = {10.4204/EPTCS.167.10},
timestamp = {Wed, 12 Sep 2018 01:05:15 +0200},
biburl = {https://dblp.org/rec/bib/journals/corr/Wenzel14},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
@proceedings{DBLP:journals/corr/BenzmullerP14,
editor = {Christoph Benzm{\"{u}}ller and
Bruno {Woltzenlogel Paleo}},
title = {Proceedings Eleventh Workshop on User Interfaces for Theorem Provers,
{UITP} 2014, Vienna, Austria, 17th July 2014},
series = {{EPTCS}},
volume = {167},
year = {2014},
url = {https://doi.org/10.4204/EPTCS.167},
deactivated_doi = {10.4204/EPTCS.167},
timestamp = {Wed, 12 Sep 2018 01:05:15 +0200},
biburl = {https://dblp.org/rec/bib/journals/corr/BenzmullerP14},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
@inproceedings{DBLP:conf/mkm/BarrasGHRTWW13,
author = {Bruno Barras and
Lourdes Del Carmen Gonz{\'{a}}lez{-}Huesca and
Hugo Herbelin and
Yann R{\'{e}}gis{-}Gianas and
Enrico Tassi and
Makarius Wenzel and
Burkhart Wolff},
title = {Pervasive Parallelism in Highly-Trustable Interactive Theorem Proving
Systems},
deactivated_booktitle = {Intelligent Computer Mathematics - MKM, Calculemus, DML, and Systems
and Projects 2013, Held as Part of {CICM} 2013, Bath, UK, July 8-12,
2013. Proceedings},
pages = {359--363},
year = {2013},
crossref = {DBLP:conf/mkm/2013},
url = {https://doi.org/10.1007/978-3-642-39320-4\_29},
deactivated_doi = {10.1007/978-3-642-39320-4\_29},
timestamp = {Sun, 02 Jun 2019 21:17:34 +0200},
biburl = {https://dblp.org/rec/bib/conf/mkm/BarrasGHRTWW13},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
@proceedings{DBLP:conf/mkm/2013,
editor = {Jacques Carette and
David Aspinall and
Christoph Lange and
Petr Sojka and
Wolfgang Windsteiger},
title = {Intelligent Computer Mathematics - MKM, Calculemus, DML, and Systems
and Projects 2013, Held as Part of {CICM} 2013, Bath, UK, July 8-12,
2013. Proceedings},
series = {Lecture Notes in Computer Science},
volume = {7961},
publisher = {Springer},
year = {2013},
url = {https://doi.org/10.1007/978-3-642-39320-4},
deactivated_doi = {10.1007/978-3-642-39320-4},
isbn = {978-3-642-39319-8},
timestamp = {Sun, 02 Jun 2019 21:17:34 +0200},
biburl = {https://dblp.org/rec/bib/conf/mkm/2013},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
@article{DBLP:journals/afp/LammichW19,
author = {Peter Lammich and
Simon Wimmer},
title = {{IMP2} - Simple Program Verification in Isabelle/HOL},
journal = {Archive of Formal Proofs},
volume = {2019},
year = {2019},
url = {https://www.isa-afp.org/entries/IMP2.html},
timestamp = {Mon, 20 May 2019 11:45:07 +0200},
biburl = {https://dblp.org/rec/bib/journals/afp/LammichW19},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
@misc{frama-c-home-page,
title = {The Frama-C Home Page},
author = {CEA LIST},
year = 2019,
month = jan,
day = 10,
url = {https://frama-c.com},
note = {Accessed \DTMdate{2019-03-24}}
}
@inproceedings{DBLP:conf/fm/LeinenbachS09,
author = {Dirk Leinenbach and Thomas Santen},
title = {Verifying the Microsoft Hyper-V Hypervisor with {VCC}},
deactivated_booktitle = {{FM} 2009: Formal Methods, Second World Congress, Eindhoven, The Netherlands,
November 2-6, 2009. Proceedings},
pages = {806--809},
year = {2009},
url = {https://doi.org/10.1007/978-3-642-05089-3_51},
deactivated_doi = {10.1007/978-3-642-05089-3_51},
timestamp = {Mon, 22 May 2017 17:11:19 +0200},
biburl = {https://dblp.org/rec/bib/conf/fm/LeinenbachS09},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
@inproceedings{DBLP:conf/tap/Keller18,
author = {Chantal Keller},
title = {Tactic Program-Based Testing and Bounded Verification in Isabelle/HOL},
deactivated_booktitle = {Tests and Proofs - 12th International Conference, {TAP} 2018, Held
as Part of {STAF} 2018, Toulouse, France, June 27-29, 2018, Proceedings},
pages = {103--119},
year = {2018},
url = {https://doi.org/10.1007/978-3-319-92994-1\_6},
deactivated_doi = {10.1007/978-3-319-92994-1\_6},
timestamp = {Mon, 18 Jun 2018 13:57:50 +0200},
biburl = {https://dblp.org/rec/bib/conf/tap/Keller18},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
@inproceedings{DBLP:conf/itp/AissatVW16,
author = {Romain A{\"{\i}}ssat and
Fr{\'{e}}d{\'{e}}ric Voisin and
Burkhart Wolff},
title = {Infeasible Paths Elimination by Symbolic Execution Techniques - Proof
of Correctness and Preservation of Paths},
deactivated_booktitle = {Interactive Theorem Proving - 7th International Conference, {ITP}
2016, Nancy, France, August 22-25, 2016, Proceedings},
pages = {36--51},
year = {2016},
url = {https://doi.org/10.1007/978-3-319-43144-4\_3},
deactivated_doi = {10.1007/978-3-319-43144-4\_3},
timestamp = {Thu, 17 Aug 2017 16:22:01 +0200},
biburl = {https://dblp.org/rec/bib/conf/itp/AissatVW16},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
@article{DBLP:journals/tocs/KleinAEMSKH14,
author = {Gerwin Klein and
June Andronick and
Kevin Elphinstone and
Toby C. Murray and
Thomas Sewell and
Rafal Kolanski and
Gernot Heiser},
title = {Comprehensive formal verification of an {OS} microkernel},
journal = {{ACM} Trans. Comput. Syst.},
volume = {32},
number = {1},
pages = {2:1--2:70},
year = {2014},
url = {http://doi.acm.org/10.1145/2560537},
deactivated_doi = {10.1145/2560537},
timestamp = {Tue, 03 Jan 2017 11:51:57 +0100},
biburl = {https://dblp.org/rec/bib/journals/tocs/KleinAEMSKH14},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
@inproceedings{DBLP:conf/pldi/GreenawayLAK14,
author = {David Greenaway and
Japheth Lim and
June Andronick and
Gerwin Klein},
title = {Don't sweat the small stuff: formal verification of {C} code without
the pain},
deactivated_booktitle = {{ACM} {SIGPLAN} Conference on Programming Language Design and Implementation,
{PLDI} '14, Edinburgh, United Kingdom - June 09 - 11, 2014},
pages = {429--439},
year = {2014},
url = {http://doi.acm.org/10.1145/2594291.2594296},
deactivated_doi = {10.1145/2594291.2594296},
timestamp = {Tue, 20 Dec 2016 10:12:01 +0100},
biburl = {https://dblp.org/rec/bib/conf/pldi/GreenawayLAK14},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
@inproceedings{DBLP:conf/mkm/BruckerACW18,
author = {Achim D. Brucker and
Idir A{\"{\i}}t{-}Sadoune and
Paolo Crisafulli and
Burkhart Wolff},
title = {Using the Isabelle Ontology Framework - Linking the Formal with the
Informal},
deactivated_booktitle = {Intelligent Computer Mathematics - 11th International Conference,
{CICM} 2018, Hagenberg, Austria, August 13-17, 2018, Proceedings},
pages = {23--38},
year = {2018},
url = {https://doi.org/10.1007/978-3-319-96812-4\_3},
deactivated_doi = {10.1007/978-3-319-96812-4\_3},
timestamp = {Sat, 11 Aug 2018 00:57:41 +0200},
biburl = {https://dblp.org/rec/bib/conf/mkm/BruckerACW18},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
@article{DBLP:journals/afp/TuongW15,
author = {Fr{\'{e}}d{\'{e}}ric Tuong and
Burkhart Wolff},
title = {A Meta-Model for the Isabelle {API}},
journal = {Archive of Formal Proofs},
volume = {2015},
year = {2015},
url = {https://www.isa-afp.org/entries/Isabelle\_Meta\_Model.shtml},
timestamp = {Mon, 07 Jan 2019 11:16:33 +0100},
biburl = {https://dblp.org/rec/bib/journals/afp/TuongW15},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
@inproceedings{DBLP:conf/tphol/WinwoodKSACN09,
author = {Simon Winwood and
Gerwin Klein and
Thomas Sewell and
June Andronick and
David Cock and
Michael Norrish},
title = {Mind the Gap},
deactivated_booktitle = {Theorem Proving in Higher Order Logics, 22nd International Conference,
TPHOLs 2009, Munich, Germany, August 17-20, 2009. Proceedings},
pages = {500--515},
year = {2009},
crossref = {DBLP:conf/tphol/2009},
url = {https://doi.org/10.1007/978-3-642-03359-9\_34},
deactivated_doi = {10.1007/978-3-642-03359-9\_34},
timestamp = {Fri, 02 Nov 2018 09:49:05 +0100},
biburl = {https://dblp.org/rec/bib/conf/tphol/WinwoodKSACN09},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
@proceedings{DBLP:conf/tphol/2009,
editor = {Stefan Berghofer and
Tobias Nipkow and
Christian Urban and
Makarius Wenzel},
title = {Theorem Proving in Higher Order Logics, 22nd International Conference,
TPHOLs 2009, Munich, Germany, August 17-20, 2009. Proceedings},
series = {Lecture Notes in Computer Science},
volume = {5674},
publisher = {Springer},
year = {2009},
url = {https://doi.org/10.1007/978-3-642-03359-9},
deactivated_doi = {10.1007/978-3-642-03359-9},
isbn = {978-3-642-03358-2},
timestamp = {Tue, 23 May 2017 01:12:08 +0200},
biburl = {https://dblp.org/rec/bib/conf/tphol/2009},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
@article{DBLP:journals/afp/BruckerTW14,
author = {Achim D. Brucker and
Fr{\'{e}}d{\'{e}}ric Tuong and
Burkhart Wolff},
title = {Featherweight {OCL:} {A} Proposal for a Machine-Checked Formal Semantics
for {OCL} 2.5},
journal = {Archive of Formal Proofs},
volume = {2014},
year = {2014},
url = {https://www.isa-afp.org/entries/Featherweight\_OCL.shtml},
timestamp = {Mon, 07 Jan 2019 11:16:33 +0100},
biburl = {https://dblp.org/rec/bib/journals/afp/BruckerTW14},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
@inproceedings{DBLP:conf/tacas/SananZHZTL17,
author = {David San{\'{a}}n and
Yongwang Zhao and
Zhe Hou and
Fuyuan Zhang and
Alwen Tiu and
Yang Liu},
title = {CSimpl: {A} Rely-Guarantee-Based Framework for Verifying Concurrent
Programs},
deactivated_booktitle = {Tools and Algorithms for the Construction and Analysis of Systems
- 23rd International Conference, {TACAS} 2017, Held as Part of the
European Joint Conferences on Theory and Practice of Software, {ETAPS}
2017, Uppsala, Sweden, April 22-29, 2017, Proceedings, Part {I}},
pages = {481--498},
year = {2017},
crossref = {DBLP:conf/tacas/2017-1},
url = {https://doi.org/10.1007/978-3-662-54577-5\_28},
deactivated_doi = {10.1007/978-3-662-54577-5\_28},
timestamp = {Mon, 18 Sep 2017 08:38:37 +0200},
biburl = {https://dblp.org/rec/bib/conf/tacas/SananZHZTL17},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
@proceedings{DBLP:conf/tacas/2017-1,
editor = {Axel Legay and
Tiziana Margaria},
title = {Tools and Algorithms for the Construction and Analysis of Systems
- 23rd International Conference, {TACAS} 2017, Held as Part of the
European Joint Conferences on Theory and Practice of Software, {ETAPS}
2017, Uppsala, Sweden, April 22-29, 2017, Proceedings, Part {I}},
series = {Lecture Notes in Computer Science},
volume = {10205},
year = {2017},
url = {https://doi.org/10.1007/978-3-662-54577-5},
deactivated_doi = {10.1007/978-3-662-54577-5},
isbn = {978-3-662-54576-8},
timestamp = {Wed, 24 May 2017 08:28:32 +0200},
biburl = {https://dblp.org/rec/bib/conf/tacas/2017-1},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
@inproceedings{DBLP:conf/itp/HouSTL17,
author = {Zhe Hou and
David San{\'{a}}n and
Alwen Tiu and
Yang Liu},
title = {Proof Tactics for Assertions in Separation Logic},
deactivated_booktitle = {Interactive Theorem Proving - 8th International Conference, {ITP}
2017, Bras{\'{\i}}lia, Brazil, September 26-29, 2017, Proceedings},
pages = {285--303},
year = {2017},
crossref = {DBLP:conf/itp/2017},
url = {https://doi.org/10.1007/978-3-319-66107-0\_19},
deactivated_doi = {10.1007/978-3-319-66107-0\_19},
timestamp = {Mon, 18 Sep 2017 08:38:37 +0200},
biburl = {https://dblp.org/rec/bib/conf/itp/HouSTL17},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
@proceedings{DBLP:conf/itp/2017,
editor = {Mauricio Ayala{-}Rinc{\'{o}}n and
C{\'{e}}sar A. Mu{\~{n}}oz},
title = {Interactive Theorem Proving - 8th International Conference, {ITP}
2017, Bras{\'{\i}}lia, Brazil, September 26-29, 2017, Proceedings},
series = {Lecture Notes in Computer Science},
volume = {10499},
publisher = {Springer},
year = {2017},
url = {https://doi.org/10.1007/978-3-319-66107-0},
deactivated_doi = {10.1007/978-3-319-66107-0},
isbn = {978-3-319-66106-3},
timestamp = {Wed, 06 Sep 2017 14:53:52 +0200},
biburl = {https://dblp.org/rec/bib/conf/itp/2017},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
@article{DBLP:journals/sigbed/CarrascosaCMBC14,
author = {E. Carrascosa and
Javier Coronel and
Miguel Masmano and
Patricia Balbastre and
Alfons Crespo},
title = {XtratuM hypervisor redesign for {LEON4} multicore processor},
journal = {{SIGBED} Review},
volume = {11},
number = {2},
pages = {27--31},
year = {2014},
url = {https://doi.org/10.1145/2668138.2668142},
deactivated_doi = {10.1145/2668138.2668142},
timestamp = {Tue, 06 Nov 2018 12:51:31 +0100},
biburl = {https://dblp.org/rec/bib/journals/sigbed/CarrascosaCMBC14},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
@article{DBLP:journals/cacm/Earley70,
author = {Jay Earley},
title = {An Efficient Context-Free Parsing Algorithm},
journal = {Commun. {ACM}},
volume = {13},
number = {2},
pages = {94--102},
year = {1970},
url = {https://doi.org/10.1145/362007.362035},
deactivated_doi = {10.1145/362007.362035},
timestamp = {Wed, 14 Nov 2018 10:22:30 +0100},
biburl = {https://dblp.org/rec/bib/journals/cacm/Earley70},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
@article{DBLP:journals/jfp/Hutton92,
author = {Graham Hutton},
title = {Higher-Order Functions for Parsing},
journal = {J. Funct. Program.},
volume = {2},
number = {3},
pages = {323--343},
year = {1992},
url = {https://doi.org/10.1017/S0956796800000411},
deactivated_doi = {10.1017/S0956796800000411},
timestamp = {Sat, 27 May 2017 14:24:34 +0200},
biburl = {https://dblp.org/rec/bib/journals/jfp/Hutton92},
bibsource = {dblp computer science bibliography, https://dblp.org}
}

View File

@ -1,9 +0,0 @@
template-beamerposter-UNSUPPORTED
template-beamer-UNSUPPORTED
template-lipics-v2021-UNSUPPORTED
template-lncs
template-scrartcl
template-scrreprt
template-scrreprt-modern
template-sn-article-UNSUPPORTED
template-svjour3-UNSUPPORTED

View File

@ -1,9 +0,0 @@
session "template-beamer-UNSUPPORTED" = "Isabelle_DOF-Ontologies" +
options [document = pdf, document_output = "output", document_build = dof]
(*theories [document = false]
A
B*)
theories
"template-beamer-UNSUPPORTED"
document_files
"preamble.tex"

View File

@ -1 +0,0 @@
%% This is a placeholder for user-specific configuration and packages.

View File

@ -1,72 +0,0 @@
(*<*)
theory
"template-beamer-UNSUPPORTED"
imports
"Isabelle_DOF-Ontologies.document_templates"
Isabelle_DOF.scholarly_paper
begin
list_templates
use_template "beamer-UNSUPPORTED"
list_ontologies
use_ontology "scholarly_paper"
(*>*)
title* [tit::title]\<open>Formal Verification of Security Protocols\<close>
author*[alice, email = "\<open>alice@example.com\<close>",
affiliation = "\<open>Wonderland University\<close>"]\<open>Alice\<close>
(*
author*[bob, email = "\<open>bob@example.com\<close>",
affiliation = "\<open>Wonderland University\<close>"]\<open>Bob\<close>
*)
text\<open>
\begin{frame}
\frametitle{Example Slide}
\centering\huge This is an example!
\end{frame}
\<close>
frame*[test_frame
, frametitle = \<open>\<open>\<open>Example Slide\<^sub>t\<^sub>e\<^sub>s\<^sub>t\<close> with items @{thm "HOL.refl"}\<close>\<close>
, framesubtitle = "''Subtitle''"]
\<open>This is an example!
\<^item> The term \<^term>\<open>refl\<close> is...
\<^item> and the term encoding the title of this frame is \<^term_>\<open>frametitle @{frame \<open>test_frame\<close>}\<close>\<close>
frame*[test_frame2
, frametitle = "''Example Slide''"
, framesubtitle = \<open>\<open>\<open>Subtitle\<^sub>t\<^sub>e\<^sub>s\<^sub>t:\<close> the value of \<^term>\<open>(3::int) + 3\<close> is @{value "(3::int) + 3"}\<close>\<close>]
\<open>Test frame env \<^term>\<open>refl\<close>\<close>
frame*[test_frame3
, options = "''allowframebreaks''"
, frametitle = "''Example Slide with frame break''"
, framesubtitle = \<open>\<open>\<open>Subtitle\<^sub>t\<^sub>e\<^sub>s\<^sub>t:\<close> the value of \<^term>\<open>(3::int) + 3\<close> is @{value "(3::int) + 3"}\<close>\<close>]
\<open>
\<^item> The term \<^term>\<open>refl\<close> is...
\<^item> and the term encoding the title of this frame is \<^term_>\<open>frametitle @{frame \<open>test_frame3\<close>}\<close>
\<^item> The term \<^term>\<open>refl\<close> is...
\<^item> The term \<^term>\<open>refl\<close> is...
\<^item> The term \<^term>\<open>refl\<close> is...
\<^item> The term \<^term>\<open>refl\<close> is...
\<^item> The term \<^term>\<open>refl\<close> is...
\<^item> The term \<^term>\<open>refl\<close> is...
\<^item> The term \<^term>\<open>refl\<close> is...
\<^item> The term \<^term>\<open>refl\<close> is...
\<^item> The term \<^term>\<open>refl\<close> is...
\<^item> The term \<^term>\<open>refl\<close> is...
\<^item> The term \<^term>\<open>refl\<close> is...
\<^item> The term \<^term>\<open>refl\<close> is...
\<^item> The term \<^term>\<open>refl\<close> is...
\<^item> The term \<^term>\<open>refl\<close> is...
\<^item> The term \<^term>\<open>refl\<close> is...
\<^item> The term \<^term>\<open>refl\<close> is...
\<^item> The term \<^term>\<open>refl\<close> is...
\<^item> The term \<^term>\<open>refl\<close> is...
\<close>
(*<*)
end
(*>*)

View File

@ -1,9 +0,0 @@
session "template-beamerposter-UNSUPPORTED" = "Isabelle_DOF-Ontologies" +
options [document = pdf, document_output = "output", document_build = dof]
(*theories [document = false]
A
B*)
theories
"template-beamerposter-UNSUPPORTED"
document_files
"preamble.tex"

View File

@ -1 +0,0 @@
%% This is a placeholder for user-specific configuration and packages.

View File

@ -1,21 +0,0 @@
theory
"template-beamerposter-UNSUPPORTED"
imports
"Isabelle_DOF-Ontologies.document_templates"
Isabelle_DOF.scholarly_paper
begin
list_templates
use_template "beamerposter-UNSUPPORTED"
list_ontologies
use_ontology "scholarly_paper"
title* [tit::title]\<open>Formal Verification of Security Protocols\<close>
author*[alice, email = "\<open>alice@example.com\<close>",
http_site = "\<open>https://example.com/alice\<close>",
affiliation = "\<open>Wonderland University\<close>"]\<open>Alice\<close>
author*[bob, email = "\<open>bob@example.com\<close>",
http_site = "\<open>https://example.com/bob\<close>",
affiliation = "\<open>Wonderland University\<close>"]\<open>Bob\<close>
end

View File

@ -1,10 +0,0 @@
session "template-lipics-v2021-UNSUPPORTED" = "Isabelle_DOF-Ontologies" +
options [document = pdf, document_output = "output", document_build = dof]
(*theories [document = false]
A
B*)
theories
"template-lipics-v2021-UNSUPPORTED"
document_files
"preamble.tex"
"lipics-v2021.cls"

View File

@ -1 +0,0 @@
%% This is a placeholder for user-specific configuration and packages.

View File

@ -1,21 +0,0 @@
theory
"template-lipics-v2021-UNSUPPORTED"
imports
"Isabelle_DOF-Ontologies.document_templates"
Isabelle_DOF.scholarly_paper
begin
list_templates
use_template "lipics-v2021-UNSUPPORTED"
list_ontologies
use_ontology "scholarly_paper"
title* [tit::title]\<open>Formal Verification of Security Protocols\<close>
author*[alice, email = "\<open>alice@example.com\<close>",
http_site = "\<open>https://example.com/alice\<close>",
affiliation = "\<open>Wonderland University\<close>"]\<open>Alice\<close>
author*[bob, email = "\<open>bob@example.com\<close>",
http_site = "\<open>https://example.com/bob\<close>",
affiliation = "\<open>Wonderland University\<close>"]\<open>Bob\<close>
end

View File

@ -1,9 +0,0 @@
session "template-lncs" = "Isabelle_DOF-Ontologies" +
options [document = pdf, document_output = "output", document_build = dof]
(*theories [document = false]
A
B*)
theories
"template-lncs"
document_files
"preamble.tex"

View File

@ -1 +0,0 @@
%% This is a placeholder for user-specific configuration and packages.

View File

@ -1,21 +0,0 @@
theory
"template-lncs"
imports
"Isabelle_DOF-Ontologies.document_templates"
Isabelle_DOF.scholarly_paper
begin
list_templates
use_template "lncs"
list_ontologies
use_ontology "scholarly_paper"
title* [tit::title]\<open>Formal Verification of Security Protocols\<close>
author*[alice, email = "\<open>alice@example.com\<close>",
http_site = "\<open>https://example.com/alice\<close>",
affiliation = "\<open>Wonderland University\<close>"]\<open>Alice\<close>
author*[bob, email = "\<open>bob@example.com\<close>",
http_site = "\<open>https://example.com/bob\<close>",
affiliation = "\<open>Wonderland University\<close>"]\<open>Bob\<close>
end

View File

@ -1,9 +0,0 @@
session "template-scrartcl" = "Isabelle_DOF-Ontologies" +
options [document = pdf, document_output = "output", document_build = dof]
(*theories [document = false]
A
B*)
theories
"template-scrartcl"
document_files
"preamble.tex"

View File

@ -1 +0,0 @@
%% This is a placeholder for user-specific configuration and packages.

View File

@ -1,21 +0,0 @@
theory
"template-scrartcl"
imports
"Isabelle_DOF-Ontologies.document_templates"
Isabelle_DOF.scholarly_paper
begin
list_templates
use_template "scrartcl"
list_ontologies
use_ontology "scholarly_paper"
title* [tit::title]\<open>Formal Verification of Security Protocols\<close>
author*[alice, email = "\<open>alice@example.com\<close>",
http_site = "\<open>https://example.com/alice\<close>",
affiliation = "\<open>Wonderland University\<close>"]\<open>Alice\<close>
author*[bob, email = "\<open>bob@example.com\<close>",
http_site = "\<open>https://example.com/bob\<close>",
affiliation = "\<open>Wonderland University\<close>"]\<open>Bob\<close>
end

View File

@ -1,9 +0,0 @@
session "template-scrreprt-modern" = "Isabelle_DOF-Ontologies" +
options [document = pdf, document_output = "output", document_build = dof]
(*theories [document = false]
A
B*)
theories
"template-scrreprt-modern"
document_files
"preamble.tex"

View File

@ -1 +0,0 @@
%% This is a placeholder for user-specific configuration and packages.

View File

@ -1,21 +0,0 @@
theory
"template-scrreprt-modern"
imports
"Isabelle_DOF-Ontologies.document_templates"
Isabelle_DOF.technical_report
begin
list_templates
use_template "scrreprt-modern"
list_ontologies
use_ontology "technical_report"
title* [tit::title]\<open>Formal Verification of Security Protocols\<close>
author*[alice, email = "\<open>alice@example.com\<close>",
http_site = "\<open>https://example.com/alice\<close>",
affiliation = "\<open>Wonderland University\<close>"]\<open>Alice\<close>
author*[bob, email = "\<open>bob@example.com\<close>",
http_site = "\<open>https://example.com/bob\<close>",
affiliation = "\<open>Wonderland University\<close>"]\<open>Bob\<close>
end

View File

@ -1,9 +0,0 @@
session "template-scrreprt" = "Isabelle_DOF-Ontologies" +
options [document = pdf, document_output = "output", document_build = dof]
(*theories [document = false]
A
B*)
theories
"template-scrreprt"
document_files
"preamble.tex"

View File

@ -1 +0,0 @@
%% This is a placeholder for user-specific configuration and packages.

View File

@ -1,21 +0,0 @@
theory
"template-scrreprt"
imports
"Isabelle_DOF-Ontologies.document_templates"
Isabelle_DOF.technical_report
begin
list_templates
use_template "scrreprt"
list_ontologies
use_ontology "technical_report"
title* [tit::title]\<open>Formal Verification of Security Protocols\<close>
author*[alice, email = "\<open>alice@example.com\<close>",
http_site = "\<open>https://example.com/alice\<close>",
affiliation = "\<open>Wonderland University\<close>"]\<open>Alice\<close>
author*[bob, email = "\<open>bob@example.com\<close>",
http_site = "\<open>https://example.com/bob\<close>",
affiliation = "\<open>Wonderland University\<close>"]\<open>Bob\<close>
end

View File

@ -1,10 +0,0 @@
session "template-sn-article-UNSUPPORTED" = "Isabelle_DOF-Ontologies" +
options [document = pdf, document_output = "output", document_build = dof]
(*theories [document = false]
A
B*)
theories
"template-sn-article-UNSUPPORTED"
document_files
"preamble.tex"
"sn-jnl.cls"

View File

@ -1 +0,0 @@
%% This is a placeholder for user-specific configuration and packages.

View File

@ -1,21 +0,0 @@
theory
"template-sn-article-UNSUPPORTED"
imports
"Isabelle_DOF-Ontologies.document_templates"
Isabelle_DOF.scholarly_paper
begin
list_templates
use_template "sn-article-UNSUPPORTED"
list_ontologies
use_ontology "scholarly_paper"
title* [tit::title]\<open>Formal Verification of Security Protocols\<close>
author*[alice, email = "\<open>alice@example.com\<close>",
http_site = "\<open>https://example.com/alice\<close>",
affiliation = "\<open>Wonderland University\<close>"]\<open>Alice\<close>
author*[bob, email = "\<open>bob@example.com\<close>",
http_site = "\<open>https://example.com/bob\<close>",
affiliation = "\<open>Wonderland University\<close>"]\<open>Bob\<close>
end

View File

@ -1,11 +0,0 @@
session "template-svjour3-UNSUPPORTED" = "Isabelle_DOF-Ontologies" +
options [document = pdf, document_output = "output", document_build = dof]
(*theories [document = false]
A
B*)
theories
"template-svjour3-UNSUPPORTED"
document_files
"preamble.tex"
"svjour3.cls"
"svglov3.clo"

View File

@ -1 +0,0 @@
%% This is a placeholder for user-specific configuration and packages.

View File

@ -1,101 +0,0 @@
% SVJour3 DOCUMENT CLASS OPTION SVGLOV3 -- for standardised journals
%
% This is an enhancement for the LaTeX
% SVJour3 document class for Springer journals
%
%%
%%
%% \CharacterTable
%% {Upper-case \A\B\C\D\E\F\G\H\I\J\K\L\M\N\O\P\Q\R\S\T\U\V\W\X\Y\Z
%% Lower-case \a\b\c\d\e\f\g\h\i\j\k\l\m\n\o\p\q\r\s\t\u\v\w\x\y\z
%% Digits \0\1\2\3\4\5\6\7\8\9
%% Exclamation \! Double quote \" Hash (number) \#
%% Dollar \$ Percent \% Ampersand \&
%% Acute accent \' Left paren \( Right paren \)
%% Asterisk \* Plus \+ Comma \,
%% Minus \- Point \. Solidus \/
%% Colon \: Semicolon \; Less than \<
%% Equals \= Greater than \> Question mark \?
%% Commercial at \@ Left bracket \[ Backslash \\
%% Right bracket \] Circumflex \^ Underscore \_
%% Grave accent \` Left brace \{ Vertical bar \|
%% Right brace \} Tilde \~}
\ProvidesFile{svglov3.clo}
[2006/02/03 v3.1
style option for standardised journals]
\typeout{SVJour Class option: svglov3.clo for standardised journals}
\def\validfor{svjour3}
\ExecuteOptions{final,10pt,runningheads}
% No size changing allowed, hence a "copy" of size10.clo is included
\renewcommand\normalsize{%
\if@twocolumn
\@setfontsize\normalsize\@xpt{12.5pt}%
\else
\if@smallext
\@setfontsize\normalsize\@xpt\@xiipt
\else
\@setfontsize\normalsize{9.5pt}{11.5pt}%
\fi
\fi
\abovedisplayskip=3 mm plus6pt minus 4pt
\belowdisplayskip=3 mm plus6pt minus 4pt
\abovedisplayshortskip=0.0 mm plus6pt
\belowdisplayshortskip=2 mm plus4pt minus 4pt
\let\@listi\@listI}
\normalsize
\newcommand\small{%
\if@twocolumn
\@setfontsize\small{8.5pt}\@xpt
\else
\if@smallext
\@setfontsize\small\@viiipt{9.5pt}%
\else
\@setfontsize\small\@viiipt{9.25pt}%
\fi
\fi
\abovedisplayskip 8.5\p@ \@plus3\p@ \@minus4\p@
\abovedisplayshortskip \z@ \@plus2\p@
\belowdisplayshortskip 4\p@ \@plus2\p@ \@minus2\p@
\def\@listi{\leftmargin\leftmargini
\parsep 0\p@ \@plus1\p@ \@minus\p@
\topsep 4\p@ \@plus2\p@ \@minus4\p@
\itemsep0\p@}%
\belowdisplayskip \abovedisplayskip
}
\let\footnotesize\small
\newcommand\scriptsize{\@setfontsize\scriptsize\@viipt\@viiipt}
\newcommand\tiny{\@setfontsize\tiny\@vpt\@vipt}
\if@twocolumn
\newcommand\large{\@setfontsize\large\@xiipt\@xivpt}
\newcommand\LARGE{\@setfontsize\LARGE{16pt}{18pt}}
\else
\newcommand\large{\@setfontsize\large\@xipt\@xiipt}
\newcommand\LARGE{\@setfontsize\LARGE{13pt}{15pt}}
\fi
\newcommand\Large{\@setfontsize\Large\@xivpt{16dd}}
\newcommand\huge{\@setfontsize\huge\@xxpt{25}}
\newcommand\Huge{\@setfontsize\Huge\@xxvpt{30}}
%
\def\runheadhook{\rlap{\smash{\lower6.5pt\hbox to\textwidth{\hrulefill}}}}
\if@twocolumn
\setlength{\textwidth}{17.4cm}
\setlength{\textheight}{234mm}
\AtEndOfClass{\setlength\columnsep{6mm}}
\else
\if@smallext
\setlength{\textwidth}{11.9cm}
\setlength{\textheight}{19.4cm}
\else
\setlength{\textwidth}{12.2cm}
\setlength{\textheight}{19.8cm}
\fi
\fi
%
\AtBeginDocument{%
\@ifundefined{@journalname}
{\typeout{Unknown journal: specify \string\journalname\string{%
<name of your journal>\string} in preambel^^J}}{}}
%
\endinput
%%
%% End of file `svglov3.clo'.

View File

@ -1,21 +0,0 @@
theory
"template-svjour3-UNSUPPORTED"
imports
"Isabelle_DOF-Ontologies.document_templates"
Isabelle_DOF.scholarly_paper
begin
list_templates
use_template "svjour3-UNSUPPORTED"
list_ontologies
use_ontology "scholarly_paper"
title* [tit::title]\<open>Formal Verification of Security Protocols\<close>
author*[alice, email = "\<open>alice@example.com\<close>",
http_site = "\<open>https://example.com/alice\<close>",
affiliation = "\<open>Wonderland University\<close>"]\<open>Alice\<close>
author*[bob, email = "\<open>bob@example.com\<close>",
http_site = "\<open>https://example.com/bob\<close>",
affiliation = "\<open>Wonderland University\<close>"]\<open>Bob\<close>
end

View File

@ -1,466 +0,0 @@
(*************************************************************************
* Copyright (C)
* 2019-2022 The University of Exeter
* 2019-2022 The University of Paris-Saclay
*
* License:
* This program can be redistributed and/or modified under the terms
* of the 2-clause BSD-style license.
*
* SPDX-License-Identifier: BSD-2-Clause
*************************************************************************)
chapter\<open>Common Criteria\<close>
section\<open>Terminology\<close>
(*<<*)
theory CC_terminology
imports
"Isabelle_DOF.technical_report"
begin
define_ontology "DOF-CC_terminology.sty" "CC"
(*>>*)
text\<open>We re-use the class @\<open>typ math_content\<close>, which provides also a framework for
semi-formal terminology, which we re-use by this definition.\<close>
doc_class concept_definition = math_content +
status :: status <= "semiformal"
mcc :: math_content_class <= "terminology"
tag :: string
short_tag :: "string option" <= "None"
text\<open>The \<^verbatim>\<open>short_tag\<close>, if set, is used in the presentation directly.\<close>
type_synonym concept = concept_definition
declare[[ Definition_default_class="concept_definition"]]
subsection \<open>Terminology\<close>
subsubsection \<open>Terms and definitions common in the CC\<close>
Definition* [aas_def, tag= "''adverse actions''"]
\<open>actions performed by a threat agent on an asset\<close>
declare_reference*[toeDef]
Definition* [assts_def, tag="''assets''"]
\<open>entities that the owner of the @{docitem (unchecked) toeDef} presumably places value upon \<close>
Definition* [asgn_def, tag="''assignment''"]
\<open>the specification of an identified parameter in a component (of the CC) or requirement.\<close>
declare_reference*[sfrs_def]
Definition* [assrc_def, tag="''assurance''"]
\<open>grounds for confidence that a @{docitem (unchecked) toeDef}
meets the @{docitem (unchecked) sfrs_def}\<close>
Definition* [attptl_def, tag="''attack potential''"]
\<open>measure of the effort to be expended in attacking a TOE, expressed in terms of
an attacker's expertise, resources and motivation\<close>
Definition* [argmt_def, tag= "''augmentation''"]
\<open>addition of one or more requirement(s) to a package\<close>
Definition* [authdata_def, tag="''authentication data''"]
\<open>information used to verify the claimed identity of a user\<close>
Definition* [authusr_def, tag = "''authorised user''"]
\<open>@{docitem (unchecked) toeDef} user who may,
in accordance with the @{docitem (unchecked) sfrs_def}, perform an operation\<close>
Definition* [bppDef, tag="''Base Protection Profile''"]
\<open>Protection Profile used as a basis to build a Protection Profile Configuration\<close>
Definition* [cls_def,tag="''class''"]
\<open>set of CC families that share a common focus\<close>
Definition* [cohrnt_def,tag="''coherent''"]
\<open>logically ordered and having discernible meaning For documentation, this addresses
both the actual text and the structure of the document, in terms of whether it is
understandable by its target audience.\<close>
Definition* [cmplt_def, tag="''complete''"]
\<open>property where all necessary parts of an entity have been provided
In terms of documentation, this means that all relevant information is
covered in the documentation, at such a level of detail that no further
explanation is required at that level of abstraction.\<close>
Definition* [compnt_def, tag="''component''"]
\<open>smallest selectable set of elements on which requirements may be based\<close>
Definition*[cap_def, tag="''composed assurance package''"]
\<open>assurance package consisting of requirements drawn from CC Part 3
(predominately from the ACO class), representing a point on the CC predefined
composition assurance scale\<close>
Definition* [cfrm_def,tag="''confirm''"]
\<open>declare that something has been reviewed in detail with an independent determination
of sufficiency
The level of rigour required depends on the nature of the subject matter. This
term is only applied to evaluator actions.\<close>
Definition* [cnnctvty_def, tag="''connectivity''"]
\<open>property of the @{docitem (unchecked) toeDef} allowing interaction with IT entities external to the
@{docitem (unchecked) toeDef}
This includes exchange of data by wire or by wireless means, over any
distance in any environment or configuration.\<close>
Definition* [cnstnt_def, tag="''consistent''"]
\<open>relationship between two or more entities such that there are no apparent
contradictions between these entities\<close>
Definition* [cnt_vrb_def, tag="''counter, verb''"]
\<open>meet an attack where the impact of a particular threat is mitigated
but not necessarily eradicated\<close>
declare_reference*[stDef]
declare_reference*[ppDef]
Definition* [dmnst_conf_def, tag="''demonstrable conformance''"]
\<open>relation between an @{docitem (unchecked) stDef} and a @{docitem (unchecked) ppDef},
where the @{docitem (unchecked) stDef}
provides a solution which solves the generic security problem in the PP
The @{docitem (unchecked) ppDef} and the @{docitem (unchecked) stDef} may contain
entirely different statements that discuss
different entities, use different concepts etc. Demonstrable conformance is
also suitable for a @{docitem (unchecked) toeDef} type
where several similar @{docitem (unchecked) ppDef}s already exist, thus
allowing the ST author to claim conformance to these @{docitem (unchecked) ppDef}s simultaneously,
thereby saving work.\<close>
Definition* [dmstrt_def, tag="''demonstrate''"]
\<open>provide a conclusion gained by an analysis which is less rigorous than a “proof”\<close>
Definition* [dpndcy, tag="''dependency''"]
\<open>relationship between components such that if a requirement based on the depending
component is included in a @{docitem (unchecked) ppDef}, ST or package, a requirement based on
the component that is depended upon must normally also be included
in the @{docitem (unchecked) ppDef},
@{docitem (unchecked) stDef} or package\<close>
Definition* [dscrb_def, tag="''describe''"]
\<open>provide specific details of an entity\<close>
Definition* [dtrmn_def, tag="''determine''"]
\<open>affirm a particular conclusion based on independent analysis with the objective
of reaching a particular conclusion
The usage of this term implies a truly independent analysis, usually in the
absence of any previous analysis having been performed. Compare with the
terms “confirm” or “verify” which imply that an analysis has already been
performed which needs to be reviewed\<close>
Definition* [devenv_def, tag="''development environment''"]
\<open>environment in which the @{docitem (unchecked) toeDef} is developed\<close>
Definition* [elmnt_def, tag="''element''"]
\<open>indivisible statement of a security need\<close>
Definition* [ensr_def, tag="''ensure''"]
\<open>guarantee a strong causal relationship between an action and its consequences
When this term is preceded by the word “help” it indicates that the
consequence is not fully certain, on the basis of that action alone.\<close>
Definition* [eval_def, tag="''evaluation''"]
\<open>assessment of a @{docitem (unchecked) ppDef}, an @{docitem (unchecked) stDef}
or a @{docitem (unchecked) toeDef}, against defined criteria.\<close>
Definition* [eal_def, tag= "''evaluation assurance level''"]
\<open>set of assurance requirements drawn from CC Part 3, representing a point on the
CC predefined assurance scale, that form an assurance package\<close>
Definition* [eval_auth_def, tag="''evaluation authority''"]
\<open>body that sets the standards and monitors the quality of evaluations conducted
by bodies within a specific community and implements the CC for that community
by means of an evaluation scheme\<close>
Definition* [eval_schm_def, tag="''evaluation scheme''"]
\<open>administrative and regulatory framework under which the CC is applied by an
evaluation authority within a specific community\<close>
Definition* [exstDef, tag="''exhaustive''"]
\<open>characteristic of a methodical approach taken to perform an
analysis or activity according to an unambiguous plan
This term is used in the CC with respect to conducting an analysis or other
activity. It is related to ``systematic'' but is considerably stronger, in that it
indicates not only that a methodical approach has been taken to perform the
analysis or activity according to an unambiguous plan, but that the plan that
was followed is sufficient to ensure that all possible avenues have been
exercised.\<close>
Definition* [expln_def, tag="''explain''"]
\<open> give argument accounting for the reason for taking a course of action
This term differs from both “describe” and “demonstrate”. It is intended to
answer the question “Why?” without actually attempting to argue that the
course of action that was taken was necessarily optimal.\<close>
Definition* [extn_def, tag= "''extension''"]
\<open>addition to an ST or PP of functional requirements not contained in CC
Part 2 and/or assurance requirements not contained in CC Part 3\<close>
Definition* [extnl_ent_def, tag="''external entity''"]
\<open>human or IT entity possibly interacting with the TOE from outside of the TOE boundary\<close>
Definition* [fmly_def, tag="''family''"]
\<open>set of components that share a similar goal but differ in emphasis or rigour\<close>
Definition* [fml_def, tag="''formal''"]
\<open>expressed in a restricted syntax language with defined semantics
based on well-established mathematical concepts \<close>
Definition* [gudn_doc_def, tag="''guidance documentation''"]
\<open>documentation that describes the delivery, preparation, operation,
management and/or use of the TOE\<close>
Definition* [ident_def, tag="''identity''"]
\<open>representation uniquely identifying entities (e.g. a user, a process or a disk)
within the context of the TOE
An example of such a representation is a string. For a human user, the
representation can be the full or abbreviated name or a (still unique)
pseudonym.\<close>
Definition* [infml_def, tag="''informal''"]
\<open>expressed in natural language\<close>
Definition* [intr_tsf_trans_def, tag ="''inter TSF transfers''"]
\<open>communicating data between the TOE and the security functionality of
other trusted IT products\<close>
Definition* [intl_com_chan_def, tag ="''internal communication channel''"]
\<open>communication channel between separated parts of the TOE\<close>
Definition* [int_toe_trans, tag="''internal TOE transfer''"]
\<open>communicating data between separated parts of the TOE\<close>
Definition* [inter_consistDef, tag="''internally consistent''"]
\<open>no apparent contradictions exist between any aspects of an entity
In terms of documentation, this means that there can be no statements within
the documentation that can be taken to contradict each other.\<close>
Definition* [iter_def, tag="''iteration''"]
\<open>use of the same component to express two or more distinct requirements\<close>
Definition* [jstfct_def, tag="''justification''"]
\<open>analysis leading to a conclusion “Justification” is more rigorous than a demonstration.
This term requires significant rigour in terms of very carefully and thoroughly explaining every
step of a logical argument.\<close>
Definition* [objct_def, tag="''object''"]
\<open>passive entity in the TOE, that contains or receives information,
and upon which subjects perform operations\<close>
Definition* [op_cc_cmpnt_def, tag ="''operation (on a component of the CC)''"]
\<open>modification or repetition of a component
Allowed operations on components are assignment, iteration, refinement and
selection.\<close>
Definition* [op_obj_def, tag= "''operation (on an object)''"]
\<open>specific type of action performed by a subject on an object\<close>
Definition* [op_env_def, tag= "''operational environment''"]
\<open>environment in which the TOE is operated\<close>
Definition* [org_sec_po_def, tag="''organisational security policy''"]
\<open>set of security rules, procedures, or guidelines for an organisation
A policy may pertain to a specific operational environment.\<close>
Definition* [pckg_def, tag="''package''"]
\<open>named set of either security functional or security assurance requirements
An example of a package is ``EAL 3''.\<close>
Definition* [pp_config_def, tag="''Protection Profile Configuration''"]
\<open>Protection Profile composed of Base Protection Profiles and Protection Profile Module\<close>
Definition* [pp_eval_def, tag="''Protection Profile evaluation''"]
\<open> assessment of a PP against defined criteria \<close>
Definition* [ppDef, tag="''Protection Profile''"]
\<open>implementation-independent statement of security needs for a TOE type\<close>
Definition* [ppm_def, tag="''Protection Profile Module''"]
\<open>implementation-independent statement of security needs for a TOE type
complementary to one or more Base Protection Profiles\<close>
declare_reference*[tsf_def]
Definition* [prv_def, tag="''prove''"]
\<open>show correspondence by formal analysis in its mathematical sense
It is completely rigorous in all ways. Typically, “prove” is used when there is
a desire to show correspondence between two @{docitem (unchecked) tsf_def}
representations at a high level of rigour.\<close>
Definition* [ref_def, tag="''refinement''"]
\<open>addition of details to a component\<close>
Definition* [role_def, tag="''role''"]
\<open>predefined set of rules establishing the allowed interactions between
a user and the @{docitem (unchecked) toeDef}\<close>
declare_reference*[sfp_def]
Definition* [scrt_def, tag="''secret''"]
\<open>information that must be known only to authorised users and/or the
@{docitem (unchecked) tsf_def} in order to enforce a specific @{docitem (unchecked) sfp_def}\<close>
declare_reference*[sfr_def]
Definition* [sec_stDef, tag="''secure state''"]
\<open>state in which the @{docitem (unchecked) tsf_def} data are consistent
and the @{docitem (unchecked) tsf_def}
continues correct enforcement of the @{docitem (unchecked) sfr_def}s\<close>
Definition* [sec_att_def, tag="''security attribute''"]
\<open>property of subjects, users (including external IT products), objects,
information, sessions and/or resources that is used in defining the @{docitem (unchecked) sfr_def}s
and whose values are used in enforcing the @{docitem (unchecked) sfr_def}s\<close>
Definition* [sec_def, tag="''security''"]
\<open>function policy set of rules describing specific security behaviour enforced
by the @{docitem (unchecked) tsf_def} and expressible as a set of @{docitem (unchecked) sfr_def}s\<close>
Definition* [sec_obj_def, tag="''security objective''"]
\<open>statement of an intent to counter identified threats and/or satisfy identified
organisation security policies and/or assumptions\<close>
Definition* [sec_prob_def, tag ="''security problem''"]
\<open>statement which in a formal manner defines the nature and scope of the security that
the TOE is intended to address This statement consists of a combination of:
\begin{itemize}
\item threats to be countered by the TOE and its operational environment,
\item the OSPs enforced by the TOE and its operational environment, and
\item the assumptions that are upheld for the operational environment of the TOE.
\end{itemize}\<close>
Definition* [sr_def, tag="''security requirement''", short_tag="Some(''SR'')"]
\<open>requirement, stated in a standardised language, which is meant to contribute
to achieving the security objectives for a TOE\<close>
(*<*)
text \<open>@{docitem (unchecked) toeDef}\<close>
(*>*)
Definition* [st, tag="''Security Target''", short_tag="Some(''ST'')"]
\<open>implementation-dependent statement of security needs for a specific identified
@{docitem (unchecked) toeDef}\<close>
Definition* [slct_def, tag="''selection''"]
\<open>specification of one or more items from a list in a component\<close>
Definition* [smfrml_def, tag="''semiformal''"]
\<open>expressed in a restricted syntax language with defined semantics\<close>
Definition* [spcfy_def, tag= "''specify''"]
\<open>provide specific details about an entity in a rigorous and precise manner\<close>
Definition* [strct_conf_def, tag="''strict conformance''"]
\<open>hierarchical relationship between a PP and an ST where all the requirements in the
PP also exist in the ST
This relation can be roughly defined as “the ST shall contain all statements
that are in the PP, but may contain more”. Strict conformance is expected to
be used for stringent requirements that are to be adhered to in a single
manner. \<close>
Definition* [st_eval_def, tag="''ST evaluation''"]
\<open>assessment of an ST against defined criteria\<close>
Definition* [subj_def, tag="''subject''"]
\<open>active entity in the TOE that performs operations on objects\<close>
Definition* [toe, tag= "''target of evaluation''"]
\<open>set of software, firmware and/or hardware possibly accompanied by guidance\<close>
Definition* [thrt_agnt_def, tag="''threat agent''"]
\<open>entity that can adversely act on assets\<close>
Definition* [toe_eval_def, tag="''TOE evaluation''"]
\<open>assessment of a TOE against defined criteria\<close>
Definition* [toe_res_def, tag="''TOE resource''"]
\<open>anything useable or consumable in the TOE\<close>
Definition* [toe_sf_def, tag="''TOE security functionality''", short_tag= "Some(''TSF'')"]
\<open>combined functionality of all hardware, software, and firmware of a TOE that must be relied upon
for the correct enforcement of the @{docitem (unchecked) sfr_def}s\<close>
Definition* [tr_vrb_def, tag="''trace, verb''"]
\<open>perform an informal correspondence analysis between two entities with only a
minimal level of rigour\<close>
Definition* [trnsfs_out_toeDef, tag="''transfers outside of the TOE''"]
\<open>TSF mediated communication of data to entities not under the control of the TSF\<close>
Definition* [transl_def, tag= "''translation''"]
\<open> describes the process of describing security requirements in a
standardised language.
use of the term translation in this context is not literal and does not imply
that every SFR expressed in standardised language can also be translated
back to the security objectives.\<close>
Definition* [trst_chan_def, tag="''trusted channel''"]
\<open>a means by which a TSF and another trusted IT product
can communicate with necessary confidence\<close>
Definition* [trst_it_prod_def, tag="''trusted IT product''"]
\<open>IT product, other than the TOE, which has its security functional requirements administratively coordinated with the TOE
and which is assumed to enforce its security functional requirements correctly
An example of a trusted IT product would be one that has been separately
evaluated.\<close>
Definition* [trst_path_def, tag="''trusted path''"]
\<open>means by which a user and a TSF can communicate with the necessary confidence\<close>
Definition* [tsf_data_def, tag="''TSF data''"]
\<open>data for the operation of the TOE upon which the enforcement of the SFR relies\<close>
Definition* [tsf_intrfc_def, tag="''TSF interface''"]
\<open>means by which external entities (or subjects in the TOE but outside of the TSF)
supply data to the TSF, receive data from the TSF and invoke services from the TSF\<close>
Definition* [usr_def, tag="''user''"] \<open>see external entity\<close>
Definition* [usr_datat_def, tag="''user data''"]
\<open>data for the user, that does not affect the operation of the TSF\<close>
Definition* [vrfy_def, tag="''verify''"]
\<open>rigorously review in detail with an independent determination of
sufficiency
Also see “confirm”. This term has more rigorous connotations. The term
“verify” is used in the context of evaluator actions where an independent
effort is required of the evaluator.\<close>
Definition* [dev_def, tag="''Developer''"]
\<open>who respond to actual or perceived consumer security requirements in
constructing a @{docitem (unchecked) toeDef}, reference this CC\_Part\_3
when interpreting statements of assurance requirements and determining
assurance approaches of @{docitem toe}s.\<close>
Definition*[evalu_def, tag="'' Evaluator''"]
\<open>who use the assurance requirements defined in CC\_Part\_3
as mandatory statement of evaluation criteria when determining the assurance
of @{docitem (unchecked) toeDef}s and when evaluating @{docitem ppDef}s
and @{docitem (unchecked) stDef}s.\<close>
Definition*[toeDef] \<open>\<close>
Definition*[sfrs_def] \<open>\<close>
Definition*[sfr_def] \<open>\<close>
Definition*[stDef] \<open>\<close>
Definition*[sfp_def] \<open>\<close>
Definition*[tsf_def] \<open>\<close>
end

View File

@ -1,175 +0,0 @@
(*************************************************************************
* Copyright (C)
* 2019-2022 The University of Exeter
* 2019-2022 The University of Paris-Saclay
*
* License:
* This program can be redistributed and/or modified under the terms
* of the 2-clause BSD-style license.
*
* SPDX-License-Identifier: BSD-2-Clause
*************************************************************************)
section\<open>CC 3.1.R5\<close>
(*<*)
theory "CC_v3_1_R5"
imports
"Isabelle_DOF.technical_report"
"CC_terminology"
begin
(*>*)
subsection \<open>General Infrastructure on CC Evaluations\<close>
datatype EALs = EAL1 | EAL2 | EAL3 | EAL4 | EAL5 | EAL6 | EAL7
doc_class CC_structure_element =(* text_element + *)
tag_id :: string
eval_level :: EALs
doc_class CC_text_element = text_element +
eval_level :: EALs
subsection \<open>Security target ontology\<close>
doc_class st_ref_cls = CC_text_element +
title :: string
st_version:: "(int \<times> int \<times> int)"
"authors":: "author list"
"st_date" :: string
doc_class toe_ref_cls = CC_text_element +
dev_name:: string
toe_name:: string
toe_version:: "(int \<times> int \<times> int)"
prod_name::"string option" <= None
doc_class toe_ovrw_cls = CC_text_element +
toe_type :: string
software_req :: "CC_text_element list" <= "[]"
hardware_req :: "CC_text_element list" <= "[]"
firmeware_req:: "CC_text_element list" <= "[]"
features_req :: "CC_text_element list" <= "[]"
invariant eal_consistency::
"(case eval_level \<sigma> of
EAL1 \<Rightarrow> software_req \<sigma> \<noteq> []
| EAL2 \<Rightarrow> software_req \<sigma> \<noteq> []
| EAL3 \<Rightarrow> software_req \<sigma> \<noteq> []
| EAL4 \<Rightarrow> software_req \<sigma> \<noteq> []
| _ \<Rightarrow> undefined)"
thm eal_consistency_inv_def
doc_class toe_desc_cls = CC_text_element +
software_list :: "CC_text_element list" <= "[]"
hardware_list :: "CC_text_element list" <= "[]"
firmeware_list :: "CC_text_element list" <= "[]"
sec_features_list:: "CC_text_element list" <= "[]"
doc_class ST_INTRO_MNT = CC_structure_element +
tag_id:: string
accepts "\<lbrace>st_ref_cls\<rbrace>\<^sup>* ~~ \<lbrace>toe_ref_cls\<rbrace>\<^sup>* ~~ \<lbrace>toe_ovrw_cls\<rbrace>\<^sup>* ~~ \<lbrace>toe_desc_cls\<rbrace>\<^sup>*"
doc_class cc_conf_claim_cls = CC_text_element +
cc_version:: string
ext_srs_list::"CC_text_element list option"
doc_class pp_clms_cls = CC_text_element +
pp_pckgs_list::"CC_text_element list option"
pp_config_list::"CC_text_element list option"
doc_class pckg_claim_cls = CC_text_element +
pckgs_list::"CC_text_element list option"
doc_class conf_ratio =
pp_config_list::"CC_text_element list option"
doc_class CONF_CLAIMS_MNT = CC_structure_element +
tag_id:: string
accepts "(\<lbrace>cc_conf_claim_cls\<rbrace>\<^sup>+ ~~ \<lbrace>pp_clms_cls\<rbrace>\<^sup>* ~~ \<lbrace>pckg_claim_cls\<rbrace>\<^sup>+ ~~ \<lbrace>conf_ratio\<rbrace>\<^sup>*)"
doc_class threats_cls = CC_text_element +
toe_thrts_list::"CC_text_element list option"
env_thrts_list::"CC_text_element list option"
thrt_agnts_list:: "CC_text_element list option"
advrt_acts_list:: "CC_text_element list option"
assts_list:: "CC_text_element list option"
doc_class osps_cls = CC_text_element +
toe_osps_list::"CC_text_element list option"
env_osps_list::"CC_text_element list option"
doc_class assumptions_cls = CC_text_element +
assms_phy_list::"CC_text_element list option"
assms_prsnl_list::"CC_text_element list option"
assms_cnct_list::"CC_text_element list option"
doc_class SEC_PROB_DEF_MNT = CC_structure_element +
tag_id:: string
accepts "((\<lbrace>threats_cls\<rbrace>\<^sup>+ || \<lbrace>osps_cls\<rbrace>\<^sup>+) ~~ \<lbrace>assumptions_cls\<rbrace>\<^sup>+)"
doc_class toe_sec_obj_cls = CC_text_element +
toe_obj_list:: "CC_text_element list"
doc_class env_sec_obj_cls = CC_text_element +
env_goals_list:: "CC_text_element list"
env_sites_list :: "CC_text_element list"
doc_class sec_obj_ratio =
toe_thrts_obj_trace::"((threats_cls \<times> toe_sec_obj_cls) list) option"
toe_osps_obj_trace::"((osps_cls \<times> toe_sec_obj_cls) list) option"
toe_assms_obj_trace::"((assumptions_cls \<times> toe_sec_obj_cls) list) option"
env_thrts_obj_trace::"((threats_cls \<times> toe_sec_obj_cls) list) option"
env_osps_obj_trace::"((osps_cls \<times> toe_sec_obj_cls) list) option"
env_assms_obj_trace::"((assumptions_cls \<times> toe_sec_obj_cls) list) option"
toe_thrts_just_list::"(CC_text_element list) option"
toe_osps_just_list::"(CC_text_element list) option"
toe_assms_just_list::"CC_text_element list"
env_thrts_just_list::"(CC_text_element list) option"
env_osps_just_list::"(CC_text_element list) option"
env_assms_just_list::"CC_text_element list"
doc_class ext_comp_def =
ext_comp_list::"(CC_text_element list) option"
doc_class SEC_OBJ_MNT = CC_structure_element +
tag_id:: string
accepts "(\<lbrace>toe_sec_obj_cls\<rbrace>\<^sup>+ ~~ \<lbrace>env_sec_obj_cls\<rbrace>\<^sup>+ ~~ \<lbrace>sec_obj_ratio\<rbrace>\<^sup>*~~ \<lbrace>ext_comp_def\<rbrace>\<^sup>*)"
doc_class sfrs_cls = CC_text_element +
sfrs_language::"string"
sfrs_operation::"CC_text_element"
sfrs_dependency::"CC_text_element list option"
doc_class sfrs_ratio_cls = CC_text_element +
toe_sec_obj_sfrs_trace:: "(sfrs_cls \<times> toe_sec_obj_cls) list"
toe_sec_obj_sfrs_just::"CC_text_element list option"
doc_class sars_cls = CC_text_element +
sars_language::"string"
sars_operation::"CC_text_element"
sars_dependency::"CC_text_element list option"
doc_class sars_ratio_cls = CC_text_element +
sars_explain::"CC_text_element list"
doc_class SEC_REQ_MNT =
spd_id:: string
accepts "(\<lbrace>sfrs_cls\<rbrace>\<^sup>+ ~~ \<lbrace>sfrs_ratio_cls\<rbrace>\<^sup>+ ~~ \<lbrace>sars_cls\<rbrace>\<^sup>+ ~~ \<lbrace>sars_ratio_cls\<rbrace>\<^sup>+)"
doc_class ST_MNT = CC_structure_element +
tag_id :: string
level :: EALs
accepts "(ST_INTRO_MNT ~~
CONF_CLAIMS_MNT ~~
SEC_PROB_DEF_MNT ~~
SEC_OBJ_MNT ~~
SEC_REQ_MNT)"
end

View File

@ -1,57 +0,0 @@
%% Copyright (C) University of Exeter
%% University of Paris-Saclay
%%
%% License:
%% This program can be redistributed and/or modified under the terms
%% of the LaTeX Project Public License Distributed from CTAN
%% archives in directory macros/latex/base/lppl.txt; either
%% version 1.3c of the License, or (at your option) any later version.
%% OR
%% The 2-clause BSD-style license.
%%
%% SPDX-License-Identifier: LPPL-1.3c+ OR BSD-2-Clause
\NeedsTeXFormat{LaTeX2e}\relax
\ProvidesPackage{DOF-CC_terminology}
[00/00/0000 Document-Type Support Framework for Isabelle (CC).]
\RequirePackage{DOF-COL}
\usepackage{etex}
\ifdef{\reserveinserts}{\reserveinserts{28}}{}
\newkeycommand*{\mathcc}[label=,type=%
, scholarlyUNDERSCOREpaperDOTmathUNDERSCOREcontentDOTshortUNDERSCOREname ={}%
, scholarlyUNDERSCOREpaperDOTmathUNDERSCOREcontentDOTmcc = %
, IsaUNDERSCORECOLDOTtextUNDERSCOREelementDOTlevel =%
, IsaUNDERSCORECOLDOTtextUNDERSCOREelementDOTreferentiable =%
, IsaUNDERSCORECOLDOTtextUNDERSCOREelementDOTvariants =%
, scholarlyUNDERSCOREpaperDOTtextUNDERSCOREsectionDOTmainUNDERSCOREauthor =%
, scholarlyUNDERSCOREpaperDOTtextUNDERSCOREsectionDOTfixmeUNDERSCORElist =%
, IsaUNDERSCORECOLDOTtextUNDERSCOREelementDOTlevel =%
, scholarlyUNDERSCOREpaperDOTtechnicalDOTdefinitionUNDERSCORElist =%
, scholarlyUNDERSCOREpaperDOTtechnicalDOTstatus =%
, CCUNDERSCOREterminologyDOTconceptUNDERSCOREdefinitionDOTtag=%
, CCUNDERSCOREterminologyDOTconceptUNDERSCOREdefinitionDOTshortUNDERSCOREtag=%
]
[1]
{%
\begin{isamarkuptext}%
\ifthenelse{\equal{\commandkey{scholarlyUNDERSCOREpaperDOTmathUNDERSCOREcontentDOTshortUNDERSCOREname}} {} }
{%
\begin{\commandkey{scholarlyUNDERSCOREpaperDOTmathUNDERSCOREcontentDOTmcc}}\label{\commandkey{label}}
#1
\end{\commandkey{scholarlyUNDERSCOREpaperDOTmathUNDERSCOREcontentDOTmcc}}
}{%
\begin{\commandkey{scholarlyUNDERSCOREpaperDOTmathUNDERSCOREcontentDOTmcc}}[\commandkey{scholarlyUNDERSCOREpaperDOTmathUNDERSCOREcontentDOTshortUNDERSCOREname}]\label{\commandkey{label}}
#1
\end{\commandkey{scholarlyUNDERSCOREpaperDOTmathUNDERSCOREcontentDOTmcc}}
}
\end{isamarkuptext}%
}
\expandafter\def\csname isaDofDOTtextDOTscholarlyUNDERSCOREpaperDOTmathUNDERSCOREcontent\endcsname{\mathcc}

File diff suppressed because it is too large Load Diff

View File

@ -1,397 +0,0 @@
(*************************************************************************
* Copyright (C)
* 2019-2023 The University of Exeter
* 2018-2023 The University of Paris-Saclay
* 2018 The University of Sheffield
*
* License:
* This program can be redistributed and/or modified under the terms
* of the 2-clause BSD-style license.
*
* SPDX-License-Identifier: BSD-2-Clause
*************************************************************************)
(*<<*)
theory
CENELEC_50128_Documentation
imports
CENELEC_50128
begin
define_shortcut* dof \<rightleftharpoons> \<open>\dof\<close>
isadof \<rightleftharpoons> \<open>\isadof{}\<close>
define_shortcut* TeXLive \<rightleftharpoons> \<open>\TeXLive\<close>
BibTeX \<rightleftharpoons> \<open>\BibTeX{}\<close>
LaTeX \<rightleftharpoons> \<open>\LaTeX{}\<close>
TeX \<rightleftharpoons> \<open>\TeX{}\<close>
pdf \<rightleftharpoons> \<open>PDF\<close>
ML\<open>
fun boxed_text_antiquotation name (* redefined in these more abstract terms *) =
DOF_lib.gen_text_antiquotation name DOF_lib.report_text
(fn ctxt => DOF_lib.string_2_text_antiquotation ctxt
#> DOF_lib.enclose_env false ctxt "isarbox")
val neant = K(Latex.text("",\<^here>))
fun boxed_theory_text_antiquotation name (* redefined in these more abstract terms *) =
DOF_lib.gen_text_antiquotation name DOF_lib.report_theory_text
(fn ctxt => DOF_lib.string_2_theory_text_antiquotation ctxt
#> DOF_lib.enclose_env false ctxt "isarbox"
(* #> neant *)) (*debugging *)
fun boxed_sml_text_antiquotation name =
DOF_lib.gen_text_antiquotation name (K(K()))
(fn ctxt => Input.source_content
#> Latex.text
#> DOF_lib.enclose_env true ctxt "sml")
(* the simplest conversion possible *)
fun boxed_pdf_antiquotation name =
DOF_lib.gen_text_antiquotation name (K(K()))
(fn ctxt => Input.source_content
#> Latex.text
#> DOF_lib.enclose_env true ctxt "out")
(* the simplest conversion possible *)
fun boxed_latex_antiquotation name =
DOF_lib.gen_text_antiquotation name (K(K()))
(fn ctxt => Input.source_content
#> Latex.text
#> DOF_lib.enclose_env true ctxt "ltx")
(* the simplest conversion possible *)
fun boxed_bash_antiquotation name =
DOF_lib.gen_text_antiquotation name (K(K()))
(fn ctxt => Input.source_content
#> Latex.text
#> DOF_lib.enclose_env true ctxt "bash")
(* the simplest conversion possible *)
\<close>
setup\<open>(* std_text_antiquotation \<^binding>\<open>my_text\<close> #> *)
boxed_text_antiquotation \<^binding>\<open>boxed_text\<close> #>
(* std_text_antiquotation \<^binding>\<open>my_cartouche\<close> #> *)
boxed_text_antiquotation \<^binding>\<open>boxed_cartouche\<close> #>
(* std_theory_text_antiquotation \<^binding>\<open>my_theory_text\<close>#> *)
boxed_theory_text_antiquotation \<^binding>\<open>boxed_theory_text\<close> #>
boxed_sml_text_antiquotation \<^binding>\<open>boxed_sml\<close> #>
boxed_pdf_antiquotation \<^binding>\<open>boxed_pdf\<close> #>
boxed_latex_antiquotation \<^binding>\<open>boxed_latex\<close>#>
boxed_bash_antiquotation \<^binding>\<open>boxed_bash\<close>
\<close>
(*>>*)
section*[cenelec_onto::example]\<open>Writing Certification Documents \<^boxed_theory_text>\<open>CENELEC_50128\<close>\<close>
subsection\<open>The CENELEC 50128 Example\<close>
text\<open>
The ontology \<^verbatim>\<open>CENELEC_50128\<close>\index{ontology!CENELEC\_50128} is a small ontology modeling
documents for a certification following CENELEC 50128~@{cite "boulanger:cenelec-50128:2015"}.
The \<^isadof> distribution contains a small example using the ontology ``CENELEC\_50128'' in
the directory \nolinkurl{examples/CENELEC_50128/mini_odo/}. You can inspect/edit the
integrated source example by either
\<^item> starting Isabelle/jEdit using your graphical user interface (\<^eg>, by clicking on the
Isabelle-Icon provided by the Isabelle installation) and loading the file
\nolinkurl{examples/CENELEC_50128/mini_odo/mini_odo.thy}.
\<^item> starting Isabelle/jEdit from the command line by calling:
@{boxed_bash [display]\<open>ë\prompt{\isadofdirn}ë
isabelle jedit examples/CENELEC_50128/mini_odo/mini_odo.thy \<close>}
\<close>
text\<open>\<^noindent> Finally, you
\<^item> can build the \<^pdf>-document by calling:
@{boxed_bash [display]\<open>ë\prompt{\isadofdirn}ë isabelle build mini_odo \<close>}
\<close>
subsection\<open>Modeling CENELEC 50128\<close>
text\<open>
Documents to be provided in formal certifications (such as CENELEC
50128~@{cite "boulanger:cenelec-50128:2015"} or Common Criteria~@{cite "cc:cc-part3:2006"}) can
much profit from the control of ontological consistency: a substantial amount of the work
of evaluators in formal certification processes consists in tracing down the links from
requirements over assumptions down to elements of evidence, be it in form of semi-formal
documentation, models, code, or tests. In a certification process, traceability becomes a major
concern; and providing mechanisms to ensure complete traceability already at the development of
the integrated source can in our view increase the speed and reduce the risk certification
processes. Making the link-structure machine-checkable, be it between requirements, assumptions,
their implementation and their discharge by evidence (be it tests, proofs, or authoritative
arguments), has the potential in our view to decrease the cost of software developments
targeting certifications.
As in many other cases, formal certification documents come with an own terminology and pragmatics
of what has to be demonstrated and where, and how the traceability of requirements through
design-models over code to system environment assumptions has to be assured.
In the sequel, we present a simplified version of an ontological model used in a
case-study~@{cite "bezzecchi.ea:making:2018"}. We start with an introduction of the concept of
requirement:
@{boxed_theory_text [display]\<open>
doc_class requirement = long_name :: "string option"
doc_class hypothesis = requirement +
hyp_type :: hyp_type <= physical (* default *)
datatype ass_kind = informal | semiformal | formal
doc_class assumption = requirement +
assumption_kind :: ass_kind <= informal
\<close>}
Such ontologies can be enriched by larger explanations and examples, which may help
the team of engineers substantially when developing the central document for a certification,
like an explication of what is precisely the difference between an \<^typ>\<open>hypothesis\<close> and an
\<^typ>\<open>assumption\<close> in the context of the evaluation standard. Since the PIDE makes for each
document class its definition available by a simple mouse-click, this kind on meta-knowledge
can be made far more accessible during the document evolution.
For example, the term of category \<^typ>\<open>assumption\<close> is used for domain-specific assumptions.
It has \<^const>\<open>formal\<close>, \<^const>\<open>semiformal\<close> and \<^const>\<open>informal\<close> sub-categories. They have to be
tracked and discharged by appropriate validation procedures within a
certification process, be it by test or proof. It is different from a \<^typ>\<open>hypothesis\<close>, which is
globally assumed and accepted.
In the sequel, the category \<^typ>\<open>exported_constraint\<close> (or \<^typ>\<open>EC\<close> for short)
is used for formal assumptions, that arise during the analysis,
design or implementation and have to be tracked till the final
evaluation target, and discharged by appropriate validation procedures
within the certification process, be it by test or proof. A particular class of interest
is the category \<^typ>\<open>safety_related_application_condition\<close> (or \<^typ>\<open>SRAC\<close>
for short) which is used for \<^typ>\<open>EC\<close>'s that establish safety properties
of the evaluation target. Their traceability throughout the certification
is therefore particularly critical. This is naturally modeled as follows:
@{boxed_theory_text [display]\<open>
doc_class EC = assumption +
assumption_kind :: ass_kind <= (*default *) formal
doc_class SRAC = EC +
assumption_kind :: ass_kind <= (*default *) formal
\<close>}
We now can, \<^eg>, write
@{boxed_theory_text [display]\<open>
text*[ass123::SRAC]\<open>
The overall sampling frequence of the odometer subsystem is therefore
14 khz, which includes sampling, computing and result communication
times \ldots
\<close>
\<close>}
This will be shown in the \<^pdf> as follows:
\<close>
text*[ass123::SRAC] \<open> The overall sampling frequency of the odometer
subsystem is therefore 14 khz, which includes sampling, computing and
result communication times \ldots \<close>
text\<open>Note that this \<^pdf>-output is the result of a specific setup for \<^typ>\<open>SRAC\<close>s.\<close>
subsection*[ontopide::technical]\<open>Editing Support for CENELEC 50128\<close>
figure*[figfig3::figure,relative_width="95",file_src="''figures/antiquotations-PIDE.png''"]
\<open> Standard antiquotations referring to theory elements.\<close>
text\<open> The corresponding view in @{docitem \<open>figfig3\<close>} shows core part of a document
conforming to the \<^verbatim>\<open>CENELEC_50128\<close> ontology. The first sample shows standard Isabelle antiquotations
@{cite "wenzel:isabelle-isar:2020"} into formal entities of a theory. This way, the informal parts
of a document get ``formal content'' and become more robust under change.\<close>
figure*[figfig5::figure, relative_width="95", file_src="''figures/srac-definition.png''"]
\<open> Defining a \<^typ>\<open>SRAC\<close> in the integrated source ... \<close>
figure*[figfig7::figure, relative_width="95", file_src="''figures/srac-as-es-application.png''"]
\<open> Using a \<^typ>\<open>SRAC\<close> as \<^typ>\<open>EC\<close> document element. \<close>
text\<open> The subsequent sample in @{figure \<open>figfig5\<close>} shows the definition of a
\<^emph>\<open>safety-related application condition\<close>, a side-condition of a theorem which
has the consequence that a certain calculation must be executed sufficiently fast on an embedded
device. This condition can not be established inside the formal theory but has to be
checked by system integration tests. Now we reference in @{figure \<open>figfig7\<close>} this
safety-related condition; however, this happens in a context where general \<^emph>\<open>exported constraints\<close>
are listed. \<^isadof>'s checks and establishes that this is legal in the given ontology.
\<close>
text\<open>
\<^item> \<^theory_text>\<open>@{term_ \<open>term\<close> }\<close> parses and type-checks \<open>term\<close> with term antiquotations,
for instance \<^theory_text>\<open>@{term_ \<open>@{cenelec-term \<open>FT\<close>}\<close>}\<close> will parse and check
that \<open>FT\<close> is indeed an instance of the class \<^typ>\<open>cenelec_term\<close>,
\<close>
subsection\<open>A Domain-Specific Ontology: \<^verbatim>\<open>CENELEC_50128\<close>\<close>
(*<*)
ML\<open>val toLaTeX = String.translate (fn c => if c = #"_" then "\\_" else String.implode[c])\<close>
ML\<open>writeln (DOF_core.print_doc_class_tree
@{context} (fn (n,l) => true (* String.isPrefix "technical_report" l
orelse String.isPrefix "Isa_COL" l *))
toLaTeX)\<close>
(*>*)
text\<open> The \<^verbatim>\<open>CENELEC_50128\<close> ontology in \<^theory>\<open>Isabelle_DOF-Ontologies.CENELEC_50128\<close>
is an example of a domain-specific ontology.
It is based on \<^verbatim>\<open>technical_report\<close> since we assume that this kind of format will be most
appropriate for this type of long-and-tedious documents,
%
\begin{center}
\begin{minipage}{.9\textwidth}\footnotesize
\dirtree{%
.0 .
.1 CENELEC\_50128.judgement\DTcomment{...}.
.1 CENELEC\_50128.test\_item\DTcomment{...}.
.2 CENELEC\_50128.test\_case\DTcomment{...}.
.2 CENELEC\_50128.test\_tool\DTcomment{...}.
.2 CENELEC\_50128.test\_result\DTcomment{...}.
.2 CENELEC\_50128.test\_adm\_role\DTcomment{...}.
.2 CENELEC\_50128.test\_environment\DTcomment{...}.
.2 CENELEC\_50128.test\_requirement\DTcomment{...}.
.2 CENELEC\_50128.test\_specification\DTcomment{...}.
.1 CENELEC\_50128.objectives\DTcomment{...}.
.1 CENELEC\_50128.design\_item\DTcomment{...}.
.2 CENELEC\_50128.interface\DTcomment{...}.
.1 CENELEC\_50128.sub\_requirement\DTcomment{...}.
.1 CENELEC\_50128.test\_documentation\DTcomment{...}.
.1 Isa\_COL.text\_element\DTcomment{...}.
.2 CENELEC\_50128.requirement\DTcomment{...}.
.3 CENELEC\_50128.TC\DTcomment{...}.
.3 CENELEC\_50128.FnI\DTcomment{...}.
.3 CENELEC\_50128.SIR\DTcomment{...}.
.3 CENELEC\_50128.CoAS\DTcomment{...}.
.3 CENELEC\_50128.HtbC\DTcomment{...}.
.3 CENELEC\_50128.SILA\DTcomment{...}.
.3 CENELEC\_50128.assumption\DTcomment{...}.
.4 CENELEC\_50128.AC\DTcomment{...}.
.5 CENELEC\_50128.EC\DTcomment{...}.
.6 CENELEC\_50128.SRAC\DTcomment{...}.
.3 CENELEC\_50128.hypothesis\DTcomment{...}.
.4 CENELEC\_50128.security\_hyp\DTcomment{...}.
.3 CENELEC\_50128.safety\_requirement\DTcomment{...}.
.2 CENELEC\_50128.cenelec\_text\DTcomment{...}.
.3 CENELEC\_50128.SWAS\DTcomment{...}.
.3 [...].
.2 scholarly\_paper.text\_section\DTcomment{...}.
.3 scholarly\_paper.technical\DTcomment{...}.
.4 scholarly\_paper.math\_content\DTcomment{...}.
.5 CENELEC\_50128.semi\_formal\_content\DTcomment{...}.
.1 ...
}
\end{minipage}
\end{center}
\<close>
(* TODO : Rearrange ontology hierarchies. *)
subsubsection\<open>Examples\<close>
text\<open>
The category ``exported constraint (EC)'' is, in the file
\<^file>\<open>CENELEC_50128.thy\<close> defined as follows:
@{boxed_theory_text [display]\<open>
doc_class requirement = text_element +
long_name :: "string option"
is_concerned :: "role set"
doc_class assumption = requirement +
assumption_kind :: ass_kind <= informal
doc_class AC = assumption +
is_concerned :: "role set" <= "UNIV"
doc_class EC = AC +
assumption_kind :: ass_kind <= (*default *) formal
\<close>}
\<close>
text\<open>
We now define the document representations, in the file
\<^file>\<open>DOF-CENELEC_50128.sty\<close>. Let us assume that we want to
register the definition of EC's in a dedicated table of contents (\<^boxed_latex>\<open>tos\<close>)
and use an earlier defined environment \inlineltx|\begin{EC}...\end{EC}| for their graphical
representation. Note that the \inlineltx|\newisadof{}[]{}|-command requires the
full-qualified names, \<^eg>, \<^boxed_theory_text>\<open>text.CENELEC_50128.EC\<close> for the document class and
\<^boxed_theory_text>\<open>CENELEC_50128.requirement.long_name\<close> for the attribute \<^const>\<open>long_name\<close>,
inherited from the document class \<^typ>\<open>requirement\<close>. The representation of \<^typ>\<open>EC\<close>'s
can now be defined as follows:
% TODO:
% Explain the text qualifier of the long_name text.CENELEC_50128.EC
\begin{ltx}
\newisadof{text.CENELEC_50128.EC}%
[label=,type=%
,Isa_COL.text_element.level=%
,Isa_COL.text_element.referentiable=%
,Isa_COL.text_element.variants=%
,CENELEC_50128.requirement.is_concerned=%
,CENELEC_50128.requirement.long_name=%
,CENELEC_50128.EC.assumption_kind=][1]{%
\begin{isamarkuptext}%
\ifthenelse{\equal{\commandkey{CENELEC_50128.requirement.long_name}}{}}{%
% If long_name is not defined, we only create an entry in the table tos
% using the auto-generated number of the EC
\begin{EC}%
\addxcontentsline{tos}{chapter}[]{\autoref{\commandkey{label}}}%
}{%
% If long_name is defined, we use the long_name as title in the
% layout of the EC, in the table "tos" and as index entry. .
\begin{EC}[\commandkey{CENELEC_50128.requirement.long_name}]%
\addxcontentsline{toe}{chapter}[]{\autoref{\commandkey{label}}: %
\commandkey{CENELEC_50128.requirement.long_name}}%
\DOFindex{EC}{\commandkey{CENELEC_50128.requirement.long_name}}%
}%
\label{\commandkey{label}}% we use the label attribute as anchor
#1% The main text of the EC
\end{EC}
\end{isamarkuptext}%
}
\end{ltx}
\<close>
text\<open>
For example, the @{docitem "ass123"} is mapped to
@{boxed_latex [display]
\<open>\begin{isamarkuptext*}%
[label = {ass122},type = {CENELEC_50128.SRAC},
args={label = {ass122}, type = {CENELEC_50128.SRAC},
CENELEC_50128.EC.assumption_kind = {formal}}
] The overall sampling frequence of the odometer subsystem is therefore
14 khz, which includes sampling, computing and result communication
times ...
\end{isamarkuptext*}\<close>}
This environment is mapped to a plain \<^LaTeX> command via:
@{boxed_latex [display]
\<open> \NewEnviron{isamarkuptext*}[1][]{\isaDof[env={text},#1]{\BODY}} \<close>}
\<close>
text\<open>
For the command-based setup, \<^isadof> provides a dispatcher that selects the most specific
implementation for a given \<^boxed_theory_text>\<open>doc_class\<close>:
@{boxed_latex [display]
\<open>%% The Isabelle/DOF dispatcher:
\newkeycommand+[\|]\isaDof[env={UNKNOWN},label=,type={dummyT},args={}][1]{%
\ifcsname isaDof.\commandkey{type}\endcsname%
\csname isaDof.\commandkey{type}\endcsname%
[label=\commandkey{label},\commandkey{args}]{#1}%
\else\relax\fi%
\ifcsname isaDof.\commandkey{env}.\commandkey{type}\endcsname%
\csname isaDof.\commandkey{env}.\commandkey{type}\endcsname%
[label=\commandkey{label},\commandkey{args}]{#1}%
\else%
\message{Isabelle/DOF: Using default LaTeX representation for concept %
"\commandkey{env}.\commandkey{type}".}%
\ifcsname isaDof.\commandkey{env}\endcsname%
\csname isaDof.\commandkey{env}\endcsname%
[label=\commandkey{label}]{#1}%
\else%
\errmessage{Isabelle/DOF: No LaTeX representation for concept %
"\commandkey{env}.\commandkey{type}" defined and no default %
definition for "\commandkey{env}" available either.}%
\fi%
\fi%
}\<close>}
\<close>
(*<<*)
end
(*>>*)

Some files were not shown because too many files have changed in this diff Show More