This commit is contained in:
Achim D. Brucker 2021-01-09 06:32:17 +00:00
commit 6927781d26
18 changed files with 1328 additions and 1076 deletions

View File

@ -534,14 +534,14 @@ text\<open>
text\<open>Examples for declaration of typed doc-items "assumption" and "hypothesis", text\<open>Examples for declaration of typed doc-items "assumption" and "hypothesis",
concepts defined in the underlying ontology @{theory "Isabelle_DOF.CENELEC_50128"}. \<close> concepts defined in the underlying ontology @{theory "Isabelle_DOF.CENELEC_50128"}. \<close>
text*[ass1::assumption, long_name="Some ''assumption one''"] \<open> The subsystem Y is safe. \<close> text*[ass2::assumption, long_name="Some ''assumption one''"] \<open> The subsystem Y is safe. \<close>
text*[hyp1::hypothesis] \<open> P not equal NP \<close> text*[hyp1::hypothesis] \<open> P not equal NP \<close>
text\<open>A real example fragment from a larger project, declaring a text-element as a text\<open>A real example fragment from a larger project, declaring a text-element as a
"safety-related application condition", a concept defined in the "safety-related application condition", a concept defined in the
@{theory "Isabelle_DOF.CENELEC_50128"} ontology:\<close> @{theory "Isabelle_DOF.CENELEC_50128"} ontology:\<close>
text*[hyp2::hypothesis]\<open>Under the assumption @{assumption \<open>ass1\<close>} we establish the following: ... \<close> text*[hyp2::hypothesis]\<open>Under the assumption @{assumption \<open>ass2\<close>} we establish the following: ... \<close>
text*[ass122::SRAC, long_name="Some ''ass122''"] \<open> The overall sampling frequence of the odometer text*[ass122::SRAC, long_name="Some ''ass122''"] \<open> The overall sampling frequence of the odometer
subsystem is therefore 14 khz, which includes sampling, computing and subsystem is therefore 14 khz, which includes sampling, computing and

View File

@ -19,17 +19,14 @@ begin
open_monitor*[this::article] open_monitor*[this::article]
declare[[strict_monitor_checking=false]] declare[[strict_monitor_checking=false]]
setup \<open> DOF_lib.define_shortcut \<^binding>\<open>isadof\<close> "\\isadof" define_shortcut* isadof \<rightleftharpoons> \<open>\isadof\<close>
#> DOF_lib.define_shortcut \<^binding>\<open>LaTeX\<close> "\\LaTeX{}" LaTeX \<rightleftharpoons> \<open>\LaTeX{}\<close>
#> DOF_lib.define_shortcut \<^binding>\<open>Protege\<close> "Prot{\\'e}g{\\'e}" dots \<rightleftharpoons> \<open>\ldots\<close>
#> DOF_lib.define_shortcut \<^binding>\<open>dots\<close> "\\ldots" isabelle \<rightleftharpoons> \<open>Isabelle/HOL\<close>
#> DOF_lib.define_shortcut \<^binding>\<open>isabelle\<close> "Isabelle/HOL" Protege \<rightleftharpoons> \<open>Prot{\'e}g{\'e}\<close>
\<close>
(* slanted text in contrast to italics *) (* slanted text in contrast to italics *)
setup\<open> DOF_lib.define_macro \<^binding>\<open>slanted_text\<close> "\\textsl{" "}" (K(K()))\<close> define_macro* slanted_text \<rightleftharpoons> \<open>\textsl{\<close> _ \<open>}\<close>
(*>*) (*>*)

View File

@ -1,27 +1,30 @@
(*<*) (*<*)
theory "paper" theory "paper"
imports imports "Isabelle_DOF.scholarly_paper"
"Isabelle_DOF.scholarly_paper"
begin begin
open_monitor*[this::article] open_monitor*[this::article]
declare[[strict_monitor_checking = false]] declare[[ strict_monitor_checking = false]]
declare[[ Definition_default_class = "definition"]]
declare[[ Lemma_default_class = "lemma"]]
declare[[ Theorem_default_class = "theorem"]]
setup \<open> DOF_lib.define_shortcut \<^binding>\<open>csp\<close> "CSP" define_shortcut* csp \<rightleftharpoons> \<open>CSP\<close>
#> DOF_lib.define_shortcut \<^binding>\<open>isabelle\<close> "Isabelle/HOL"\<close> holcsp \<rightleftharpoons> \<open>HOL-CSP\<close>
isabelle \<rightleftharpoons> \<open>Isabelle/HOL\<close>
(*>*) (*>*)
title*[tit::title]\<open>Philosophers may Dine - Definitively!\<close> title*[tit::title]\<open>Philosophers may Dine - Definitively!\<close>
author*[safouan,email="\<open>safouan.taha@lri.fr\<close>",affiliation="\<open>LRI, CentraleSupelec\<close>"]\<open>Safouan Taha\<close> author*[safouan,email="\<open>safouan.taha@lri.fr\<close>",affiliation="\<open>LRI, CentraleSupelec\<close>"]\<open>Safouan Taha\<close>
author*[bu,email= "\<open>wolff@lri.fr\<close>",affiliation = "\<open>LRI, Université Paris-Saclay\<close>"]\<open>Burkhart Wolff\<close> author*[bu,email= "\<open>wolff@lri.fr\<close>",affiliation = "\<open>LRI, Université Paris-Saclay\<close>"]\<open>Burkhart Wolff\<close>
author*[lina,email="\<open>lina.ye@lri.fr\<close>",affiliation="\<open>LRI, Inria, LSV, CentraleSupelec\<close>"]\<open>Lina Ye\<close> author*[lina,email="\<open>lina.ye@lri.fr\<close>",affiliation="\<open>LRI, Inria, LSV, CentraleSupelec\<close>"]\<open>Lina Ye\<close>
abstract*[abs, keywordlist="[\<open>Shallow Embedding\<close>,\<open>Process-Algebra\<close>, abstract*[abs, keywordlist="[\<open>Shallow Embedding\<close>,\<open>Process-Algebra\<close>,
\<open>Concurrency\<close>,\<open>Computational Models\<close>]"] \<open>Concurrency\<close>,\<open>Computational Models\<close>]"]
\<open> The theory of Communicating Sequential Processes going back to Hoare and Roscoe is still today \<open> The theory of Communicating Sequential Processes going back to Hoare and Roscoe is still today
one of the reference theories for concurrent specification and computing. In 1997, a first one of the reference theories for concurrent specification and computing. In 1997, a first
formalization in \<^isabelle> of the denotational semantics of the Failure/Divergence Model of formalization in \<^isabelle> of the denotational semantics of the Failure/Divergence Model of
@ -60,8 +63,8 @@ systems, such as the T9000 transansputer @{cite "Barret95"}.
The theory of \<^csp> was first described in 1978 in a book by Tony Hoare @{cite "Hoare:1985:CSP:3921"}, The theory of \<^csp> was first described in 1978 in a book by Tony Hoare @{cite "Hoare:1985:CSP:3921"},
but has since evolved substantially @{cite "BrookesHR84" and "brookes-roscoe85" and "roscoe:csp:1998"}. but has since evolved substantially @{cite "BrookesHR84" and "brookes-roscoe85" and "roscoe:csp:1998"}.
\<^csp> describes the most common communication and synchronization mechanisms \<^csp> describes the most common communication and synchronization mechanisms
with one single language primitive: synchronous communication written \<open>_\<lbrakk>_\<rbrakk>_\<close>. \<^csp> semantics is described with one single language primitive: synchronous communication written \<open>_\<lbrakk>_\<rbrakk>_\<close>. \<^csp> semantics is
by a fully abstract model of behaviour designed to be \<^emph>\<open>compositional\<close>: the denotational described by a fully abstract model of behaviour designed to be \<^emph>\<open>compositional\<close>: the denotational
semantics of a process \<open>P\<close> encompasses all possible behaviours of this process in the context of all semantics of a process \<open>P\<close> encompasses all possible behaviours of this process in the context of all
possible environments \<open>P \<lbrakk>S\<rbrakk> Env\<close> (where \<open>S\<close> is the set of \<open>atomic events\<close> both \<open>P\<close> and \<open>Env\<close> must possible environments \<open>P \<lbrakk>S\<rbrakk> Env\<close> (where \<open>S\<close> is the set of \<open>atomic events\<close> both \<open>P\<close> and \<open>Env\<close> must
synchronize). This design objective has the consequence that two kinds of choice have to synchronize). This design objective has the consequence that two kinds of choice have to
@ -156,7 +159,7 @@ Let two processes be defined as follows:
\<^enum> \<open>P\<^sub>n\<^sub>d\<^sub>e\<^sub>t = (a \<rightarrow> Stop) \<sqinter> (b \<rightarrow> Stop)\<close> \<^enum> \<open>P\<^sub>n\<^sub>d\<^sub>e\<^sub>t = (a \<rightarrow> Stop) \<sqinter> (b \<rightarrow> Stop)\<close>
\<close> \<close>
text\<open>\<^noindent> These two processes \<open>P\<^sub>d\<^sub>e\<^sub>t\<close> and \<open>P\<^sub>n\<^sub>d\<^sub>e\<^sub>t\<close> cannot be distinguished by using text\<open>These two processes \<open>P\<^sub>d\<^sub>e\<^sub>t\<close> and \<open>P\<^sub>n\<^sub>d\<^sub>e\<^sub>t\<close> cannot be distinguished by using
the trace semantics: \<open>\<T>(P\<^sub>d\<^sub>e\<^sub>t) = \<T>(P\<^sub>n\<^sub>d\<^sub>e\<^sub>t) = {[],[a],[b]}\<close>. To resolve this problem, Brookes @{cite "BrookesHR84"} the trace semantics: \<open>\<T>(P\<^sub>d\<^sub>e\<^sub>t) = \<T>(P\<^sub>n\<^sub>d\<^sub>e\<^sub>t) = {[],[a],[b]}\<close>. To resolve this problem, Brookes @{cite "BrookesHR84"}
proposed the failures model, where communication traces were augmented with the proposed the failures model, where communication traces were augmented with the
constraint information for further communication that is represented negatively as a refusal set. constraint information for further communication that is represented negatively as a refusal set.
@ -181,7 +184,7 @@ many times. However, using the \<^csp> hiding operator \<open>_\_\<close>, this
\<close> \<close>
text\<open>\<^noindent> where \<open>P\<^sub>i\<^sub>n\<^sub>f\<close> will be equivalent to \<open>\<bottom>\<close> in the process cpo ordering. text\<open>where \<open>P\<^sub>i\<^sub>n\<^sub>f\<close> will be equivalent to \<open>\<bottom>\<close> in the process cpo ordering.
To distinguish divergences from the deadlock process, Brookes and Roscoe To distinguish divergences from the deadlock process, Brookes and Roscoe
proposed failure/divergence model to incorporate divergence traces @{cite "brookes-roscoe85"}. proposed failure/divergence model to incorporate divergence traces @{cite "brookes-roscoe85"}.
A divergence trace is the one leading to a possible divergent behavior. A divergence trace is the one leading to a possible divergent behavior.
@ -245,7 +248,7 @@ Second, in the traditional literature, the semantic domain is implicitly describ
over the three semantic functions \<open>\<T>\<close>, \<open>\<F>\<close> and \<open>\<D>\<close>. over the three semantic functions \<open>\<T>\<close>, \<open>\<F>\<close> and \<open>\<D>\<close>.
Informally, these are: Informally, these are:
\<^item> the initial trace of a process must be empty; \<^item> the initial trace of a process must be empty;
\<^item> any allowed trace must be \<open>front\<^sub>-tickFree\<close>; \<^item> any allowed trace must be \<open>front\<^sub>-tickFree\<close>;
\<^item> traces of a process are \<^emph>\<open>prefix-closed\<close>; \<^item> traces of a process are \<^emph>\<open>prefix-closed\<close>;
\<^item> a process can refuse all subsets of a refusal set; \<^item> a process can refuse all subsets of a refusal set;
@ -256,8 +259,7 @@ Informally, these are:
\<^item> a trace ending with \<open>\<surd>\<close> belonging to divergence set implies that its \<^item> a trace ending with \<open>\<surd>\<close> belonging to divergence set implies that its
maximum prefix without \<open>\<surd>\<close> is also a divergent trace. maximum prefix without \<open>\<surd>\<close> is also a divergent trace.
More formally, a process \<open>P\<close> of the type \<open>\<Sigma> process\<close> should have the following properties:
\<^noindent> More formally, a process \<open>P\<close> of the type \<open>\<Sigma> process\<close> should have the following properties:
@{cartouche [display] \<open>([],{}) \<in> \<F> P \<and> @{cartouche [display] \<open>([],{}) \<in> \<F> P \<and>
@ -270,9 +272,8 @@ Informally, these are:
(\<forall> s X. s \<in> \<D> P \<longrightarrow> (s,X) \<in> \<F> P) \<and> (\<forall> s X. s \<in> \<D> P \<longrightarrow> (s,X) \<in> \<F> P) \<and>
(\<forall> s. s@[\<surd>] \<in> \<D> P \<longrightarrow> s \<in> \<D> P)\<close>} (\<forall> s. s@[\<surd>] \<in> \<D> P \<longrightarrow> s \<in> \<D> P)\<close>}
Our objective is to encapsulate this wishlist into a type constructed as a conservative Our objective is to encapsulate this wishlist into a type constructed as a conservative
theory extension in our theory HOL-\<^csp>. theory extension in our theory \<^holcsp>.
Therefore third, we define a pre-type for processes \<open>\<Sigma> process\<^sub>0\<close> by \<open> \<P>(\<Sigma>\<^sup>\<surd>\<^sup>* \<times> \<P>(\<Sigma>\<^sup>\<surd>)) \<times> \<P>(\<Sigma>\<^sup>\<surd>)\<close>. Therefore third, we define a pre-type for processes \<open>\<Sigma> process\<^sub>0\<close> by \<open> \<P>(\<Sigma>\<^sup>\<surd>\<^sup>* \<times> \<P>(\<Sigma>\<^sup>\<surd>)) \<times> \<P>(\<Sigma>\<^sup>\<surd>)\<close>.
Forth, we turn our wishlist of "axioms" above into the definition of a predicate \<open>is_process P\<close> Forth, we turn our wishlist of "axioms" above into the definition of a predicate \<open>is_process P\<close>
of type \<open>\<Sigma> process\<^sub>0 \<Rightarrow> bool\<close> deciding if its conditions are fulfilled. Since \<open>P\<close> is a pre-process, of type \<open>\<Sigma> process\<^sub>0 \<Rightarrow> bool\<close> deciding if its conditions are fulfilled. Since \<open>P\<close> is a pre-process,
@ -281,7 +282,7 @@ And last not least fifth, we use the following type definition:
\<^item> \<^theory_text>\<open>typedef '\<alpha> process = "{P :: '\<alpha> process\<^sub>0 . is_process P}"\<close> \<^item> \<^theory_text>\<open>typedef '\<alpha> process = "{P :: '\<alpha> process\<^sub>0 . is_process P}"\<close>
\<^noindent> Isabelle requires a proof for the existence of a witness for this set, Isabelle requires a proof for the existence of a witness for this set,
but this can be constructed in a straight-forward manner. Suitable definitions for but this can be constructed in a straight-forward manner. Suitable definitions for
\<open>\<T>\<close>, \<open>\<F>\<close> and \<open>\<D>\<close> lifting \<open>fst\<close> and \<open>snd\<close> on the new \<open>'\<alpha> process\<close>-type allows to derive \<open>\<T>\<close>, \<open>\<F>\<close> and \<open>\<D>\<close> lifting \<open>fst\<close> and \<open>snd\<close> on the new \<open>'\<alpha> process\<close>-type allows to derive
the above properties for any \<open>P::'\<alpha> process\<close>. \<close> the above properties for any \<open>P::'\<alpha> process\<close>. \<close>
@ -298,11 +299,9 @@ This boils down to a proof that an equivalent definition on the pre-process type
maintains \<open>is_process\<close>, \<^ie> this predicate remains invariant on the elements of the semantic domain. maintains \<open>is_process\<close>, \<^ie> this predicate remains invariant on the elements of the semantic domain.
For example, we define \<open>_\<sqinter>_\<close> on the pre-process type as follows: For example, we define \<open>_\<sqinter>_\<close> on the pre-process type as follows:
\<^item> \<^theory_text>\<open>definition "P \<sqinter> Q \<equiv> Abs_process(\<F> P \<union> \<F> Q , \<D> P \<union> \<D> Q)"\<close> \<^item> \<^theory_text>\<open>definition "P \<sqinter> Q \<equiv> Abs_process(\<F> P \<union> \<F> Q , \<D> P \<union> \<D> Q)"\<close>
where \<open>\<F> = fst \<circ> Rep_process\<close> and \<open>\<D> = snd \<circ> Rep_process\<close> and where \<open>Rep_process\<close> and
\<^noindent> where \<open>\<F> = fst \<circ> Rep_process\<close> and \<open>\<D> = snd \<circ> Rep_process\<close> and where \<open>Rep_process\<close> and
\<open>Abs_process\<close> are the representation and abstraction morphisms resulting from the \<open>Abs_process\<close> are the representation and abstraction morphisms resulting from the
type definition linking \<open>'\<alpha> process\<close> isomorphically to \<open>'\<alpha> process\<^sub>0\<close>. Proving the above properties type definition linking \<open>'\<alpha> process\<close> isomorphically to \<open>'\<alpha> process\<^sub>0\<close>. Proving the above properties
for \<open>\<F> (P \<sqinter> Q)\<close> and \<open>\<D> (P \<sqinter> Q)\<close> requires a proof that \<open>(\<F> P \<union> \<F> Q , \<D> P \<union> \<D> Q)\<close> for \<open>\<F> (P \<sqinter> Q)\<close> and \<open>\<D> (P \<sqinter> Q)\<close> requires a proof that \<open>(\<F> P \<union> \<F> Q , \<D> P \<union> \<D> Q)\<close>
@ -360,7 +359,7 @@ We define \<open>P \<sqsubseteq> Q \<equiv> \<psi>\<^sub>\<D> \<and> \<psi>\<^su
\<^enum> \<open>\<psi>\<^sub>\<M> = Mins(\<D> P) \<subseteq> \<T> Q \<close> \<^enum> \<open>\<psi>\<^sub>\<M> = Mins(\<D> P) \<subseteq> \<T> Q \<close>
\<close> \<close>
text\<open>\<^noindent> Note that the third condition \<open>\<psi>\<^sub>\<M>\<close> implies that the set of minimal divergent traces text\<open>The third condition \<open>\<psi>\<^sub>\<M>\<close> implies that the set of minimal divergent traces
(ones with no proper prefix that is also a divergence) in \<open>P\<close>, denoted by \<open>Mins(\<D> P)\<close>, (ones with no proper prefix that is also a divergence) in \<open>P\<close>, denoted by \<open>Mins(\<D> P)\<close>,
should be a subset of the trace set of \<open>Q\<close>. should be a subset of the trace set of \<open>Q\<close>.
%One may note that each element in \<open>Mins(\<D> P)\<close> do actually not contain the \<open>\<surd>\<close>, %One may note that each element in \<open>Mins(\<D> P)\<close> do actually not contain the \<open>\<surd>\<close>,
@ -397,7 +396,7 @@ The port of HOL-CSP 2 on HOLCF implied that the derivation of the entire continu
had to be completely re-done (3000 loc). had to be completely re-done (3000 loc).
\<^noindent> HOL-CSP provides an important proof principle, the fixed-point induction: HOL-CSP provides an important proof principle, the fixed-point induction:
@{cartouche [display, indent=5] \<open>cont f \<Longrightarrow> adm P \<Longrightarrow> P \<bottom> \<Longrightarrow> (\<And>X. P X \<Longrightarrow> P(f X)) \<Longrightarrow> P(\<mu>X. f X)\<close>} @{cartouche [display, indent=5] \<open>cont f \<Longrightarrow> adm P \<Longrightarrow> P \<bottom> \<Longrightarrow> (\<And>X. P X \<Longrightarrow> P(f X)) \<Longrightarrow> P(\<mu>X. f X)\<close>}
@ -528,34 +527,34 @@ To handle termination better, we added two new processes \<open>CHAOS\<^sub>S\<^
%thus must be without it. %thus must be without it.
\<close> \<close>
text*[X22::"definition"]\<open>\<open>RUN A \<equiv> \<mu> X. \<box> x \<in> A \<rightarrow> X\<close> \<close> (*<*) (* a test ...*)
text*[X32::"definition"]\<open>\<open>CHAOS A \<equiv> \<mu> X. (STOP \<sqinter> (\<box> x \<in> A \<rightarrow> X))\<close> \<close> text*[X22 ::math_content ]\<open>\<open>RUN A \<equiv> \<mu> X. \<box> x \<in> A \<rightarrow> X\<close> \<close>
Definition*[X42::"definition"]\<open>\<open>CHAOS\<^sub>S\<^sub>K\<^sub>I\<^sub>P A \<equiv> \<mu> X. (SKIP \<sqinter> STOP \<sqinter> (\<box> x \<in> A \<rightarrow> X))\<close>\<close> text*[X32::"definition", mcc=defn]\<open>\<open>CHAOS A \<equiv> \<mu> X. (STOP \<sqinter> (\<box> x \<in> A \<rightarrow> X))\<close> \<close>
Definition*[X42]\<open>\<open>CHAOS\<^sub>S\<^sub>K\<^sub>I\<^sub>P A \<equiv> \<mu> X. (SKIP \<sqinter> STOP \<sqinter> (\<box> x \<in> A \<rightarrow> X))\<close> \<close>
Definition*[X52::"definition"]\<open>\<open>CHAOS\<^sub>S\<^sub>K\<^sub>I\<^sub>P A \<equiv> \<mu> X. (SKIP \<sqinter> STOP \<sqinter> (\<box> x \<in> A \<rightarrow> X))\<close> \<close>
text\<open> The \<open>RUN\<close>-process defined @{definition X22} represents the process that accepts all text\<open> The \<open>RUN\<close>-process defined @{math_content X22} represents the process that accepts all
events, but never stops nor deadlocks. The \<open>CHAOS\<close>-process comes in two variants shown in events, but never stops nor deadlocks. The \<open>CHAOS\<close>-process comes in two variants shown in
@{definition X32} and @{definition X42}: the process that non-deterministically stops or @{definition X32} and @{definition X42} @{definition X52}: the process that non-deterministically
accepts any offered event, whereas \<open>CHAOS\<^sub>S\<^sub>K\<^sub>I\<^sub>P\<close> can additionally terminate.\<close> stops or accepts any offered event, whereas \<open>CHAOS\<^sub>S\<^sub>K\<^sub>I\<^sub>P\<close> can additionally terminate.\<close>
(*>*)
Definition*[X2]\<open>\<open>RUN A \<equiv> \<mu> X. \<box> x \<in> A \<rightarrow> X\<close> \<close>
Definition*[X2]\<open>\<open>RUN A \<equiv> \<mu> X. \<box> x \<in> A \<rightarrow> X\<close> \<close> Definition*[X3]\<open>\<open>CHAOS A \<equiv> \<mu> X. (STOP \<sqinter> (\<box> x \<in> A \<rightarrow> X))\<close> \<close>
Definition*[X3]\<open>\<open>CHAOS A \<equiv> \<mu> X. (STOP \<sqinter> (\<box> x \<in> A \<rightarrow> X))\<close> \<close>
Definition*[X4]\<open>\<open>CHAOS\<^sub>S\<^sub>K\<^sub>I\<^sub>P A \<equiv> \<mu> X. (SKIP \<sqinter> STOP \<sqinter> (\<box> x \<in> A \<rightarrow> X))\<close>\<close> Definition*[X4]\<open>\<open>CHAOS\<^sub>S\<^sub>K\<^sub>I\<^sub>P A \<equiv> \<mu> X. (SKIP \<sqinter> STOP \<sqinter> (\<box> x \<in> A \<rightarrow> X))\<close>\<close>
Definition*[X5]\<open>\<open>DF A \<equiv> \<mu> X. (\<sqinter> x \<in> A \<rightarrow> X)\<close> \<close> Definition*[X5]\<open>\<open>DF A \<equiv> \<mu> X. (\<sqinter> x \<in> A \<rightarrow> X)\<close> \<close>
Definition*[X6]\<open>\<open>DF\<^sub>S\<^sub>K\<^sub>I\<^sub>P A \<equiv> \<mu> X. ((\<sqinter> x \<in> A \<rightarrow> X) \<sqinter> SKIP)\<close> \<close> Definition*[X6]\<open>\<open>DF\<^sub>S\<^sub>K\<^sub>I\<^sub>P A \<equiv> \<mu> X. ((\<sqinter> x \<in> A \<rightarrow> X) \<sqinter> SKIP)\<close> \<close>
text\<open> \<^noindent> text\<open>In the following, we denote \<open> \<R>\<P> = {DF\<^sub>S\<^sub>K\<^sub>I\<^sub>P, DF, RUN, CHAOS, CHAOS\<^sub>S\<^sub>K\<^sub>I\<^sub>P}\<close>.
In the following, we denote \<open> \<R>\<P> = {DF\<^sub>S\<^sub>K\<^sub>I\<^sub>P, DF, RUN, CHAOS, CHAOS\<^sub>S\<^sub>K\<^sub>I\<^sub>P}\<close>.
All five reference processes are divergence-free. All five reference processes are divergence-free.
%which was done by using a particular lemma \<open>\<D> (\<mu> x. f x) = \<Inter>\<^sub>i\<^sub>\<in>\<^sub>\<nat> \<D> (f\<^sup>i \<bottom>)\<close>. %which was done by using a particular lemma \<open>\<D> (\<mu> x. f x) = \<Inter>\<^sub>i\<^sub>\<in>\<^sub>\<nat> \<D> (f\<^sup>i \<bottom>)\<close>.
@{cartouche
[display,indent=8] \<open> D (\<PP> UNIV) = {} where \<PP> \<in> \<R>\<P> and UNIV is the set of all events\<close>
@{cartouche [display,indent=8] \<open> D (\<PP> UNIV) = {} where \<PP> \<in> \<R>\<P> and UNIV is the set of all events\<close>} }
Regarding the failure refinement ordering, the set of failures \<open>\<F> P\<close> for any process \<open>P\<close> is Regarding the failure refinement ordering, the set of failures \<open>\<F> P\<close> for any process \<open>P\<close> is
a subset of \<open>\<F> (CHAOS\<^sub>S\<^sub>K\<^sub>I\<^sub>P UNIV)\<close>.% and the following lemma was proved: a subset of \<open>\<F> (CHAOS\<^sub>S\<^sub>K\<^sub>I\<^sub>P UNIV)\<close>.% and the following lemma was proved:
% This proof is performed by induction, based on the failure projection of \<open>STOP\<close> and that of internal choice. % This proof is performed by induction, based on the failure projection of \<open>STOP\<close> and that of
% internal choice.
@{cartouche [display, indent=25] \<open>CHAOS\<^sub>S\<^sub>K\<^sub>I\<^sub>P UNIV \<sqsubseteq>\<^sub>\<F> P\<close>} @{cartouche [display, indent=25] \<open>CHAOS\<^sub>S\<^sub>K\<^sub>I\<^sub>P UNIV \<sqsubseteq>\<^sub>\<F> P\<close>}
@ -615,8 +614,6 @@ be deadlocked after any non-terminating trace.
Theorem*[T1, short_name="\<open>DF definition captures deadlock-freeness\<close>"] Theorem*[T1, short_name="\<open>DF definition captures deadlock-freeness\<close>"]
\<open> \hfill \break \<open>deadlock_free P \<longleftrightarrow> (\<forall>s\<in>\<T> P. tickFree s \<longrightarrow> (s, {\<surd>}\<union>events_of P) \<notin> \<F> P)\<close> \<close> \<open> \hfill \break \<open>deadlock_free P \<longleftrightarrow> (\<forall>s\<in>\<T> P. tickFree s \<longrightarrow> (s, {\<surd>}\<union>events_of P) \<notin> \<F> P)\<close> \<close>
Definition*[X11]\<open> \<open>livelock\<^sub>-free P \<equiv> \<D> P = {} \<close> \<close> Definition*[X11]\<open> \<open>livelock\<^sub>-free P \<equiv> \<D> P = {} \<close> \<close>
text\<open> Recall that all five reference processes are livelock-free. text\<open> Recall that all five reference processes are livelock-free.
@ -632,12 +629,12 @@ Finally, we proved the following theorem that confirms the relationship between
properties: properties:
\<close> \<close>
Theorem*[T2, short_name="''DF implies LF''"] Theorem*[T2, short_name="''DF implies LF''"]
\<open> \hspace{0.5cm} \<open>deadlock_free P \<longrightarrow> livelock_free P\<close> \<close> \<open> \<open>deadlock_free P \<longrightarrow> livelock_free P\<close> \<close>
text\<open> text\<open>
This is totally natural, at a first glance, but surprising as the proof of deadlock-freeness only requires This is totally natural, at a first glance, but surprising as the proof of deadlock-freeness only
failure refinement \<open>\<sqsubseteq>\<^sub>\<F>\<close> (see @{definition \<open>X10\<close>}) where divergence traces are mixed within the failures set. requires failure refinement \<open>\<sqsubseteq>\<^sub>\<F>\<close> (see @{definition \<open>X10\<close>}) where divergence traces are mixed within
Note that the existing tools in the literature normally detect these two phenomena the failures set. Note that the existing tools in the literature normally detect these two phenomena
separately, such as FDR for which checking livelock-freeness is very costly. separately, such as FDR for which checking livelock-freeness is very costly.
In our framework, deadlock-freeness of a given system In our framework, deadlock-freeness of a given system
implies its livelock-freeness. However, if a system is not deadlock-free, implies its livelock-freeness. However, if a system is not deadlock-free,
@ -695,13 +692,13 @@ refinement orderings. We state:
@{theory_text [display,indent=5] \<open>lemma: COPY \<sqsubseteq> SYSTEM\<close>} @{theory_text [display,indent=5] \<open>lemma: COPY \<sqsubseteq> SYSTEM\<close>}
\<^noindent> and apply fixed-point induction over \<open>COPY\<close>; this leaves us to the three subgoals: and apply fixed-point induction over \<open>COPY\<close>; this leaves us to the three subgoals:
\<^enum> \<open>adm (\<lambda>a. a \<sqsubseteq> (SEND \<lbrakk>SYN\<rbrakk> REC) \ SYN)\<close> \<^enum> \<open>adm (\<lambda>a. a \<sqsubseteq> (SEND \<lbrakk>SYN\<rbrakk> REC) \ SYN)\<close>
\<^enum> \<open>\<bottom> \<sqsubseteq> (SEND \<lbrakk>SYN\<rbrakk> REC) \ SYN\<close> \<^enum> \<open>\<bottom> \<sqsubseteq> (SEND \<lbrakk>SYN\<rbrakk> REC) \ SYN\<close>
\<^enum> @{cartouche [display]\<open>P \<sqsubseteq> (SEND \<lbrakk>SYN\<rbrakk> REC) \ SYN \<Longrightarrow> \<^enum> @{cartouche [display]\<open>P \<sqsubseteq> (SEND \<lbrakk>SYN\<rbrakk> REC) \ SYN \<Longrightarrow>
left?x \<rightarrow> right!x \<rightarrow> P \<sqsubseteq> (SEND \<lbrakk>SYN\<rbrakk> REC) \ SYN\<close>} left?x \<rightarrow> right!x \<rightarrow> P \<sqsubseteq> (SEND \<lbrakk>SYN\<rbrakk> REC) \ SYN\<close>}
\<^noindent> The first two sub-proofs are automatic simplification proofs; the third requires unfolding The first two sub-proofs are automatic simplification proofs; the third requires unfolding
\<open>SEND\<close> and \<open>REC\<close> one step and applying the algebraic laws. No denotational \<open>SEND\<close> and \<open>REC\<close> one step and applying the algebraic laws. No denotational
semantics reasoning is necessary here; it is just an induct-simplify proof consisting semantics reasoning is necessary here; it is just an induct-simplify proof consisting
of 2 lines proof-script involving the derived algebraic laws of \<^csp>. of 2 lines proof-script involving the derived algebraic laws of \<^csp>.

View File

@ -16,24 +16,25 @@ theory "00_Frontmatter"
imports "Isabelle_DOF.technical_report" imports "Isabelle_DOF.technical_report"
begin begin
section\<open>Document Local Setup.\<close>
text\<open>Some internal setup, introducing document specific abbreviations and macros.\<close>
setup \<open>DOF_lib.define_shortcut \<^binding>\<open>dof\<close> "\\dof"\<close> section\<open>Local Document Setup.\<close>
setup \<open>DOF_lib.define_shortcut \<^binding>\<open>isadof\<close> "\\isadof"\<close> text\<open>... introducing document specific abbreviations and macros.\<close>
setup \<open> DOF_lib.define_shortcut \<^binding>\<open>TeXLive\<close>"\\TeXLive"
#> DOF_lib.define_shortcut \<^binding>\<open>BibTeX\<close> "\\BibTeX{}"
#> DOF_lib.define_shortcut \<^binding>\<open>LaTeX\<close> "\\LaTeX{}"
#> DOF_lib.define_shortcut \<^binding>\<open>TeX\<close> "\\TeX{}"
#> DOF_lib.define_shortcut \<^binding>\<open>pdf\<close> "PDF"
#> DOF_lib.define_shortcut \<^binding>\<open>pdftex\<close> "\\pdftex{}"
\<close>
text\<open>Note that these setups assume that the associated \<^LaTeX> macros are defined, \<^eg>, define_shortcut* dof \<rightleftharpoons> \<open>\dof\<close>
in the document prelude. \<close> isadof \<rightleftharpoons> \<open>\isadof\<close>
setup\<open> DOF_lib.define_macro \<^binding>\<open>index\<close> "\\index{" "}" (K(K())) (*no checking, no reporting*) define_shortcut* TeXLive \<rightleftharpoons> \<open>\TeXLive\<close>
#> DOF_lib.define_macro \<^binding>\<open>bindex\<close> "\\bindex{" "}"(K(K()))\<close> BibTeX \<rightleftharpoons> \<open>\BibTeX{}\<close>
LaTeX \<rightleftharpoons> \<open>\LaTeX{}\<close>
TeX \<rightleftharpoons> \<open>\TeX{}\<close>
pdf \<rightleftharpoons> \<open>PDF\<close>
pdftex \<rightleftharpoons> \<open>\pdftex{}\<close>
text\<open>Note that these setups assume that the associated \<^LaTeX> macros
are defined, \<^eg>, in the document prelude. \<close>
define_macro* index \<rightleftharpoons> \<open>\index{\<close> _ \<open>}\<close>
define_macro* bindex \<rightleftharpoons> \<open>\bindex{\<close> _ \<open>}\<close>
ML\<open> ML\<open>

View File

@ -16,6 +16,7 @@ theory
"03_GuidedTour" "03_GuidedTour"
imports imports
"02_Background" "02_Background"
"Isabelle_DOF.technical_report"
"Isabelle_DOF.CENELEC_50128" "Isabelle_DOF.CENELEC_50128"
begin begin
(*>*) (*>*)
@ -424,6 +425,7 @@ doc_class "theorem" = math_content +
mcc :: "math_content_class" <= "thm" ... mcc :: "math_content_class" <= "thm" ...
\<close>}\<close> \<close>}\<close>
text\<open>The class \<^verbatim>\<open>technical\<close> regroups a number of text-elements that contain typical text\<open>The class \<^verbatim>\<open>technical\<close> regroups a number of text-elements that contain typical
"technical content" in mathematical or engineering papers: code, definitions, theorems, "technical content" in mathematical or engineering papers: code, definitions, theorems,
lemmas, examples. From this class, the more stricter class of @{typ \<open>math_content\<close>} is derived, lemmas, examples. From this class, the more stricter class of @{typ \<open>math_content\<close>} is derived,

View File

@ -16,144 +16,84 @@ theory
"04_RefMan" "04_RefMan"
imports imports
"03_GuidedTour" "03_GuidedTour"
"Isabelle_DOF.Isa_COL" "Isabelle_DOF.technical_report"
begin begin
declare_reference*[infrastructure::technical]
(*>*) (*>*)
chapter*[isadof_ontologies::technical]\<open>Developing Ontologies\<close> chapter*[isadof_ontologies::technical]\<open>Ontologies and their Development\<close>
text\<open> text\<open>
In this chapter, we explain the concepts for modeling new ontologies, developing a document In this chapter, we explain the concepts of \<^isadof> in a more systematic way, and give
representation for them, as well as developing new document templates. guidelines for modeling new ontologies, present underlying concepts for a mapping to a
\<close> representation, and give hints for the development of new document templates.
section*[infrastructure::technical]\<open>Overview and Technical Infrastructure\<close>
text\<open>
\<^isadof> is embedded in the underlying generic document model of Isabelle as described in \<^isadof> is embedded in the underlying generic document model of Isabelle as described in
\<^introduction>\<open>dof\<close>. Recall that the document language can be extended dynamically, \<^ie>, new \<^introduction>\<open>dof\<close>. Recall that the document language can be extended dynamically, \<^ie>, new
\<open>user-defined\<close> can be introduced at run-time. This is similar to the definition of new functions \<open>user-defined\<close> can be introduced at run-time. This is similar to the definition of new functions
in an interpreter. \<^isadof> as a system plugin is is a number of new command definitions in in an interpreter. \<^isadof> as a system plugin provides a number of new command definitions in
Isabelle's document model. Isabelle's document model.
\<^isadof> consists consists basically of four components: \<^isadof> consists consists basically of five components:
\<^item> an own \<^emph>\<open>family of text-elements\<close> such as \<^boxed_theory_text>\<open>title*\<close>, \<^boxed_theory_text>\<open>chapter*\<close> \<^item> the \<^emph>\<open>DOF-core\<close> providing the \<^emph>\<open>ontology definition language\<close> (called ODL)
\<^boxed_theory_text>\<open>text*\<close>, etc., which can be annotated with meta-information defined in the which allow for the definitions of document-classes and necessary auxiliary datatypes,
underlying ontology definition and allow to build a \<^emph>\<open>core\<close> document, \<^item> the \<^emph>\<open>DOF-core\<close> also provides an own \<^emph>\<open>family of commands\<close> such as
\<^item> the \<^emph>\<open>ontology definition language\<close> (called ODL) which allow for the definitions \<^boxed_theory_text>\<open>text*\<close>, \<^boxed_theory_text>\<open>declare_reference*\<close>, \<^etc>.;
of document-classes and necessary auxiliary datatypes, They allow for the annotation of text-elements with meta-information defined in ODL,
\<^item> the \<^isadof> library of ontologies providing ontological concepts as well
as supporting infrastructure,
\<^item> an infrastructure for ontology-specific \<^emph>\<open>layout definitions\<close>, exploiting this meta-information, \<^item> an infrastructure for ontology-specific \<^emph>\<open>layout definitions\<close>, exploiting this meta-information,
and and
\<^item> an infrastructure for generic \<^emph>\<open>layout definitions\<close> for documents following, \<^eg>, the format \<^item> an infrastructure for generic \<^emph>\<open>layout definitions\<close> for documents following, \<^eg>, the format
guidelines of publishers or standardization bodies. guidelines of publishers or standardization bodies.
\<close> \<close>
text\<open>
The list of fully supported (\<^ie>, supporting both interactive ontological modeling and
document generation) ontologies and the list of supported document templates can be
obtained by calling \inlinebash|isabelle mkroot_DOF -h| (see \<^technical>\<open>first_project\<close>).
Note that the postfix \inlinebash|-UNSUPPORTED| denotes experimental ontologies or templates
for which further manual setup steps might be required or that are not fully tested. Also note
that the \<^LaTeX>-class files required by the templates need to be already installed on your
system. This is mostly a problem for publisher specific templates (\<^eg>, Springer's
\<^path>\<open>llncs.cls\<close>), which cannot be re-distributed due to copyright restrictions.
\<close>
subsection\<open>Ontologies\<close>
text\<open> text\<open>
The document core \<^emph>\<open>may\<close>, but \<^emph>\<open>must\<close> not use Isabelle definitions or proofs for checking the Similarly to Isabelle, which is based on a core logic \<^theory>\<open>Pure\<close> and then extended by libraries
formal content---this manual is actually an example of a document not containing any proof. to major systems like \<^verbatim>\<open>HOL\<close> or \<^verbatim>\<open>FOL\<close>, \<^isadof> has a generic core infrastructure \<^dof> and then
Consequently, the document editing and checking facility provided by \<^isadof> addresses the needs presents itself to users via major library extensions, which add domain-specific
of common users for an advanced text-editing environment, neither modeling nor proof knowledge is system-extensions. Consequently, ontologies in \<^isadof> are not just a sequence of descriptions in
inherently required. \<^isadof>'s Ontology Definition Language (ODL). Rather, they are themselves presented as integrated
sources that provide textual decriptions, abbreviations, macro-support and even ML-code.
We expect authors of ontologies to have experience in the use of \<^isadof>, basic modeling (and, Conceptually, the library of \<^isadof> is currently organized as follows
potentially, some basic SML programming) experience, basic \<^LaTeX> knowledge, and, last but not \<^footnote>\<open>Note that the \<^emph>\<open>technical\<close> organisation is slightly different and shown in
least, domain knowledge of the ontology to be modeled. Users with experience in UML-like @{technical (unchecked) \<open>infrastructure\<close>}.\<close>:
meta-modeling will feel familiar with most concepts; however, we expect no need for insight in %
the Isabelle proof language, for example, or other more advanced concepts.
Technically, ontologies\<^index>\<open>ontology!directory structure\<close> are stored in a directory
\inlinebash|src/ontologies| and consist of a Isabelle theory file and a \<^LaTeX> -style file:
\begin{center} \begin{center}
\begin{minipage}{.9\textwidth} \begin{minipage}{.9\textwidth}
\dirtree{% \dirtree{%
.1 . .1 COL\DTcomment{The Common Ontology Library}.
.2 src. .2 scholarly\_paper\DTcomment{Scientific Papers}.
.3 ontologies\DTcomment{Ontologies}. .3 technical\_report\DTcomment{Extended Papers}.
.4 ontologies.thy\DTcomment{Ontology Registration}. .4 CENELEC\_50128\DTcomment{Papers according to CENELEC\_50128}.
.4 CENELEC\_50128\DTcomment{CENELEC\_50128}. .4 CC\_v3\_1\_R5\DTcomment{Papers according to Common Criteria}.
.5 CENELEC\_50128.thy.
.5 DOF-CENELEC\_50128.sty.
.4 scholarly\_paper\DTcomment{scholarly\_paper}.
.5 scholarly\_paper.thy.
.5 DOF-scholarly\_paper.sty.
.4 \ldots. .4 \ldots.
} }
\end{minipage} \end{minipage}
\end{center} \end{center}
\<close>
text\<open>
Developing a new ontology ``\inlinebash|foo|'' requires, from a technical perspective, the
following steps:
\<^item> create a new sub-directory \inlinebash|foo| in the directory \inlinebash|src/ontologies|
\<^item> definition of the ontological concepts, using \<^isadof>'s Ontology Definition Language (ODL), in
a new theory file \<^path>\<open>src/ontologies/foo/foo.thy\<close>.
\<^item> definition of the document representation for the ontological concepts in a \LaTeX-style
file \<^path>\<open>src/ontologies/foo/DOF-foo.sty\<close>
\<^item> registration (as import) of the new ontology in the file.
\<^path>\<open>src/ontologies/ontologies.thy\<close>.
\<^item> activation of the new document setup by executing the install script. You can skip the lengthy
checks for the AFP entries and the installation of the Isabelle patch by using the
\inlinebash|--skip-patch-and-afp| option:
\begin{bash}
ë\prompt{\isadofdirn}ë ./install --skip-patch-and-afp
\end{bash}
\<close>
subsection\<open>Document Templates\<close> These libraries not only provide ontological concepts, but also syntactic sugar in Isabelle's
text\<open> command language Isar that is of major importance for users (and may be felt as \<^isadof> key
Document-templates\<^index>\<open>document template\<close> define the overall layout (page size, margins, fonts, features by many authors). In reality,
etc.) of the generated documents and are the the main technical means for implementing layout they are derived concepts from more generic ones; for example, the commands
requirements that are, \<^eg>, required by publishers or standardization bodies. Document-templates \<^boxed_theory_text>\<open>title*\<close>, \<^boxed_theory_text>\<open>section*\<close>, \<^boxed_theory_text>\<open>subsection*\<close>, \<^etc>,
are stored in a directory are in reality a kind of macros for \<^boxed_theory_text>\<open>text*[<label>::title]...\<close>,
\<^path>\<open>src/document-templates\<close>:\<^index>\<open>document template!directory structure\<close> \<^boxed_theory_text>\<open>text*[<label>::section]...\<close>, respectively.
\begin{center} These example commands are defined in the COL.
\begin{minipage}{.9\textwidth}
\dirtree{%
.1 .
.2 src.
.3 document-templates\DTcomment{Document templates}.
.4 root-lncs.tex.
.4 root-scrartcl.tex.
.4 root-scrreprt-modern.tex.
.4 root-scrreprt.tex.
}
\end{minipage}
\end{center}
\<close>
text\<open> As mentioned earlier, our ontology framework is currently particularly geared towards
Developing a new document template ``\inlinebash|bar|'' requires the following steps: \<^emph>\<open>document\<close> editing, structuring and presentation (future applications might be advanced
\<^item> develop a new \<^LaTeX>-template \inlinebash|src/document-templates/root-bar.tex| "knowledge-based" search procedures as well as tool interaction). For this reason, ontologies
\<^item> activation of the new document template by executing the install script. You can skip the lengthy are coupled with \<^emph>\<open>layout definitions\<close> allowing an automatic mapping of an integrated
checks for the AFP entries and the installation of the Isabelle patch by using the source into \<^LaTeX> and finally \<^pdf>. The mapping of an ontology to a specific representation
\inlinebash|--skip-patch-and-afp| option: in \<^LaTeX> is steered via associated \<^LaTeX> stylefiles which were included during Isabelle's
document generation process. This mapping is potentially a one-to-many mapping;
\begin{bash} this implies a certain technical organisation and some resulting restrictions
ë\prompt{\isadofdirn}ë ./install --skip-patch-and-afp described in @{technical (unchecked) \<open>infrastructure\<close>} in more detail.
\end{bash}
\<close>
text\<open>
As the document generation of \<^isadof> is based
on \<^LaTeX>, the \<^isadof> document templates can (and should) make use of any \<^LaTeX>-classes provided
by publishers or standardization bodies.
\<close> \<close>
section\<open>The Ontology Definition Language (ODL)\<close> section\<open>The Ontology Definition Language (ODL)\<close>
text\<open> text\<open>
ODL shares some similarities with meta-modeling languages such as UML class ODL shares some similarities with meta-modeling languages such as UML class
models: It builds upon concepts like class, inheritance, class-instances, attributes, references models: It builds upon concepts like class, inheritance, class-instances, attributes, references
@ -263,7 +203,7 @@ text\<open>
text\<open> text\<open>
Advanced ontologies can, \<^eg>, use recursive function definitions with Advanced ontologies can, \<^eg>, use recursive function definitions with
pattern-matching~@{cite "kraus:defining:2020"}, extensible record pattern-matching~@{cite "kraus:defining:2020"}, extensible record
pecifications~@{cite "wenzel:isabelle-isar:2020"}, and abstract type declarations. specifications~@{cite "wenzel:isabelle-isar:2020"}, and abstract type declarations.
\<close> \<close>
text\<open>Note that \<^isadof> works internally with fully qualified names in order to avoid confusions text\<open>Note that \<^isadof> works internally with fully qualified names in order to avoid confusions
@ -296,7 +236,6 @@ A document class\<^bindex>\<open>document class\<close> can be defined using the
\<^rail>\<open> 'inv' (name '::')? '"' term '"' \<close> \<^rail>\<open> 'inv' (name '::')? '"' term '"' \<close>
\<^item> \<open>accepts_clause\<close>:\<^index>\<open>accepts\_clause@\<open>accepts_clause\<close>\<close> \<^item> \<open>accepts_clause\<close>:\<^index>\<open>accepts\_clause@\<open>accepts_clause\<close>\<close>
\<^rail>\<open> 'accepts' '"' regexpr '"'\<close> \<^rail>\<open> 'accepts' '"' regexpr '"'\<close>
\<^clearpage>
\<^item> \<open>rejects_clause\<close>:\<^index>\<open>rejects\_clause@\<open>rejects_clause\<close>\<close> \<^item> \<open>rejects_clause\<close>:\<^index>\<open>rejects\_clause@\<open>rejects_clause\<close>\<close>
\<^rail>\<open> 'rejects' (class_id * ',') \<close> \<^rail>\<open> 'rejects' (class_id * ',') \<close>
\<^item> \<open>default_clause\<close>:\<^index>\<open>default\_clause@\<open>default_clause\<close>\<close> \<^item> \<open>default_clause\<close>:\<^index>\<open>default\_clause@\<open>default_clause\<close>\<close>
@ -356,43 +295,161 @@ text\<open>
special characters in definitions that need to make use of a entries in an aux-file. special characters in definitions that need to make use of a entries in an aux-file.
\<close> \<close>
subsection\<open>Common Ontology Library (COL)\<close> section\<open>Fundamental Commands of the \<^isadof> Core\<close>
text\<open>Besides the core-commands to define an ontology as presented in the previous section,
text\<open>\<^isadof> uses the concept of implicit abstract classes (or: \<^emph>\<open>shadow classes\<close>). the \<^isadof> core provides a number of mechanisms to \<^emph>\<open>use\<close> the resulting data to annotate
These refer to the set of possible \<^boxed_theory_text>\<open>doc_class\<close> declarations that posses a number text-elements and, in some cases, terms.
of attributes with their types in common. Shadow classes represent an implicit requirement
(or pre-condition) on a given class to posses these attributes in order to work properly
for certain \<^isadof> commands.
shadow classes will find concrete instances in COL, but \<^isadof> text elements do not \<^emph>\<open>depend\<close>
on our COL definitions: Ontology developers are free to build own class instances for these
shadow classes, with own attributes and, last not least, own definitions of invariants independent
from ours.
In particular, these shadow classes are used at present in \<^isadof>:
@{boxed_theory_text [display]\<open>
DOCUMENT_ALIKES =
level :: "int option" <= "None"
ASSERTION_ALIKES =
properties :: "term list"
FORMAL_STATEMENT_ALIKE =
properties :: "thm list"
\<close>}
These shadow-classes correspond to semantic macros
\<^ML>\<open>Onto_Macros.enriched_document_cmd_exp\<close>,
\<^ML>\<open>Onto_Macros.assertion_cmd'\<close>, and
\<^ML>\<open>Onto_Macros.enriched_formal_statement_command\<close>.\<close>
text\<open> \<^isadof> provides a Common Ontology Library (COL)\<^index>\<open>Common Ontology Library@see COL\<close>
\<^bindex>\<open>COL\<close> that introduces ontology concepts that are either sample instances for shadow
classes as we use them in our own document generation processes or, in some cases, are
so generic that they we expect them to be useful for all types of documents (figures, for example).
\<close> \<close>
subsection\<open>Syntax\<close>
text\<open>In the following, we formally introduce the syntax of the core commands as
supported on the Isabelle/Isar level. Note that some more advanced functionality of the Core
is currently only available in the SML API's of the kernel.
\<^item> \<open>meta_args\<close> :
\<^rail>\<open>obj_id ('::' class_id) ((',' attribute '=' term) *) \<close>
\<^item> \<open>upd_meta_args\<close> :
\<^rail>\<open> (obj_id ('::' class_id) ((',' attribute ('=' | '+=') term) * ))\<close>
\<^item> \<open>annotated_text_element\<close> :
\<^rail>\<open>
( @@{command "text*"}'[' meta_args ']' '\<open>' text '\<close>' |
( @@{command "open_monitor*"}
| @@{command "close_monitor*"}
| @@{command "declare_reference*"}
) '[' meta_args ']'
)
| change_status_command
| inspection_command
| macro_command
\<close>
\<^item> \<^isadof> \<open>change_status_command\<close> :
\<^rail>\<open> (@@{command "update_instance*"} '[' upd_meta_args ']')
| (@@{command "declare_reference*"} (obj_id ('::' class_id)))\<close>
\<^item> \<^isadof> \<open>inspection_command\<close> :
\<^rail>\<open> @@{command "print_doc_classes"}
| @@{command "print_doc_items"}
| @@{command "check_doc_global"}\<close>
\<^item> \<^isadof> \<open>macro_command\<close> :
\<^rail>\<open> @@{command "define_shortcut*"} name ('\<rightleftharpoons>' | '==') '\<open>' string '\<close>'
| @@{command "define_macro*"} name ('\<rightleftharpoons>' | '==')
\<newline> '\<open>' string '\<close>' '_' '\<open>' string '\<close>' \<close>
\<close>
text\<open>Recall that with the exception of \<^theory_text>\<open>text* \<dots> \<close>, all \<^isadof> commands were mapped to visible
layout (such as \<^LaTeX>); these commands have to be wrapped into
\<^verbatim>\<open>(*<*) ... (*>*)\<close> brackets if this is undesired. \<close>
subsection\<open>Ontologic Text-Elements and their Management\<close>
text\<open> \<^theory_text>\<open>text*[oid::cid, ...] \<open>\<open>\<close> \<dots> text \<dots> \<open>\<close>\<close> \<close> is the core-command of \<^isadof>: it permits to create
an object of meta-data belonging to the class \<^theory_text>\<open>cid\<close>. This is viewed as the \<^emph>\<open>definition\<close> of
an instance of a document class. This instance object is attached to the text-element
and makes it thus "trackable" for \<^isadof>, \<^ie>, it can be referenced via the \<^theory_text>\<open>oid\<close>, its attributes
can be set by defaults in the class-definitions, or set at creation time, or modified at any
point after creation via \<^theory_text>\<open>update_instance*[oid, ...]\<close>. The \<^theory_text>\<open>class_id\<close> is syntactically optional;
if ommitted, an object belongs to an anonymous superclass of all classes.
The \<^theory_text>\<open>class_id\<close> is used to generate a \<^emph>\<open>class-type\<close> in HOL; note that this may impose lexical
restrictions as well as to name-conflicts in the surrounding logical context.
In many cases, it is possible to use the class-type to denote the \<^theory_text>\<open>class_id\<close>; this also
holds for type-synonyms on class-types.
References to text-elements can occur textually before creation; in these cases, they must be
declared via \<^theory_text>\<open>declare_reference*[...]\<close> in order to compromise to Isabelle's fundamental
"declaration-before-use" linear-visibility evaluation principle. The forward-declared class-type
must be identical with the defined class-type.
For a declared class \<^theory_text>\<open>cid\<close>, there exists a text antiquotation of the form \<^theory_text>\<open> @{cid \<open>oid\<close>} \<close>.
The precise presentation is decided in the \<^emph>\<open>layout definitions\<close>, for example by suitable
\<^LaTeX>-template code. Declared but not yet defined instances must be referenced with a particular
pragma in order to enforce a relaxed checking \<^theory_text>\<open> @{cid (unchecked) \<open>oid\<close>} \<close>.
% there should also exist a *term* antiquotation ...
\<close>
(*<*)
declare_reference*["sec:advanced"::technical]
(*>*)
subsection\<open>Status and Query Commands\<close>
text\<open>\<^isadof> provides a number of inspection commands.
\<^item> \<^theory_text>\<open>print_doc_classes\<close> allows to view the status of the internal
class-table resulting from ODL definitions,
\<^item> \<^ML>\<open>DOF_core.print_doc_class_tree\<close> allows for presenting (fragments) of
class-inheritance trees (currently only available at ML level),
\<^item> \<^theory_text>\<open>print_doc_items\<close> allows to view the status of the internal
object-table of text-elements that were tracked, and
\<^item> \<^theory_text>\<open>check_doc_global\<close> checks if all declared object references have been
defined, and all monitors are in a final state and final invariant checks
on all objects are satisfied (cf. @{technical (unchecked) \<open>sec:advanced\<close>})
\<close>
subsection\<open>Macros\<close>
text\<open>There is a mechanism to define document-local short-cuts and macros which
were PIDE-supported but lead to an expansion in the integrated source; this feature
can be used to define
\<^item> \<^theory_text>\<open>shortcuts\<close>, \<^ie>, short names that were expanded to, for example,
\<^LaTeX>-code,
\<^item> \<^theory_text>\<open>macro\<close>'s (= parameterized short-cuts), which allow for
passing an argument to the expansion mechanism.
\<close>
text\<open>Note that the argument can be checked by an own SML-function with respect to syntactic
as well as semantic regards; however, the latter feature is currently only accessible at
the SML level and not directly in the Isar language. We would like to stress, that this
feature is basically an abstract interface to existing Isabelle functionality in the document
generation.
\<close>
subsubsection\<open>Examples\<close>
text\<open>
\<^item> common short-cut hiding \<^LaTeX> code in the integrated source:
@{theory_text [display] \<open>
define_shortcut* eg \<rightleftharpoons> \<open>\eg\<close> (* Latin: „exempli gratia“ meaning „for example“. *)
clearpage \<rightleftharpoons> \<open>\clearpage{}\<close>
\<close>}
\<^item> non-checking macro:
@{theory_text [display] \<open>
define_macro* index \<rightleftharpoons> \<open>\index{\<close> _ \<open>}\<close>
\<close>}
\<^item> checking macro:
@{theory_text [display] \<open>
setup\<open> DOF_lib.define_macro \<^binding>\<open>vs\<close> "\\vspace{" "}" (check_latex_measure) \<close>
\<close>}
where \<^ML>\<open>check_latex_measure\<close> is a hand-programmed function that checks
the input for syntactical and static semantic constraints.
\<close>
section\<open>The Standard Ontology Libraries\<close>
text\<open> We will describe the backbone of the Standard Library with the
already mentioned hierarchy \<^verbatim>\<open>COL\<close> (the common ontology library),
\<^verbatim>\<open>scholarly_paper\<close> (for MINT-oriented scientific papers),
\<^verbatim>\<open>technical_report\<close> (for MINT-oriented technical reports), and
the example for a domain-specific ontology
\<^verbatim>\<open>CENELEC_50128\<close>.\<close>
subsection\<open>Common Ontology Library (COL)\<close>
(*<*)
ML\<open>writeln (DOF_core.print_doc_class_tree @{context} (fn (n,l) => String.isPrefix "Isa_COL" l) I)\<close>
(*>*)
text\<open>
\<^isadof> provides a Common Ontology Library (COL)\<^index>\<open>Common Ontology Library@see COL\<close>
\<^bindex>\<open>COL\<close> \<^footnote>\<open>contained in \<^theory>\<open>Isabelle_DOF.Isa_COL\<close>\<close>
that introduces several ontology root concepts such as common text-elements and
figures. The overall class-tree it provides looks as follows:
%
\begin{center}
\begin{minipage}{.9\textwidth}
\dirtree{%
.0 .
.1 Isa\_COL.text\_element.
.2 Isa\_COL.chapter.
.2 Isa\_COL.section.
.2 Isa\_COL.subsection.
.2 Isa\_COL.subsubsection.
.1 Isa\_COL.figure.
.2 Isa\_COL.side\_by\_side\_figure.
.1 Isa\_COL.figure\_group.
.1 \ldots.
}
\end{minipage}
\end{center}\<close>
text\<open> text\<open>
In particular it defines the super-class \<^boxed_theory_text>\<open>text_element\<close>: the root of all In particular it defines the super-class \<^boxed_theory_text>\<open>text_element\<close>: the root of all
@ -405,7 +462,8 @@ doc_class text_element =
variants :: "String.literal set" <= "{STR ''outline'', STR ''document''}" variants :: "String.literal set" <= "{STR ''outline'', STR ''document''}"
\<close>} \<close>}
Here, \<^boxed_theory_text>\<open>level\<close> defines the section-level (\<^eg>, using a \<^LaTeX>-inspired hierarchy: As mentioned in @{technical \<open>sss\<close>} (without explaining the origin of \<^typ>\<open>text_element\<close>)
, \<^boxed_theory_text>\<open>level\<close> defines the section-level (\<^eg>, using a \<^LaTeX>-inspired hierarchy:
from \<^boxed_theory_text>\<open>Some -1\<close> (corresponding to \inlineltx|\part|) to from \<^boxed_theory_text>\<open>Some -1\<close> (corresponding to \inlineltx|\part|) to
\<^boxed_theory_text>\<open>Some 0\<close> (corresponding to \inlineltx|\chapter|, respectively, \<^boxed_theory_text>\<open>chapter*\<close>) \<^boxed_theory_text>\<open>Some 0\<close> (corresponding to \inlineltx|\chapter|, respectively, \<^boxed_theory_text>\<open>chapter*\<close>)
to \<^boxed_theory_text>\<open>Some 3\<close> (corresponding to \inlineltx|\subsubsection|, respectively, to \<^boxed_theory_text>\<open>Some 3\<close> (corresponding to \inlineltx|\subsubsection|, respectively,
@ -413,18 +471,309 @@ to \<^boxed_theory_text>\<open>Some 3\<close> (corresponding to \inlineltx|\subs
any sequence of technical-elements must be introduced by a text-element with a higher level any sequence of technical-elements must be introduced by a text-element with a higher level
(this would require that technical text section are introduce by a section element). (this would require that technical text section are introduce by a section element).
Similarly, we provide "minimal" instances of the \<^boxed_theory_text>\<open>ASSERTION_ALIKES\<close> The attribute \<^term>\<open>referentiable\<close> captures the information if a text-element can be target
and \<^boxed_theory_text>\<open>FORMAL_STATEMENT_ALIKE\<close> shadow classes: for a reference, which is the case for sections or subsections, for example, but not arbitrary
elements such as, \<^ie>, paragraphs (this mirrors restrictions of the target \<^LaTeX> representation).
The attribute \<^term>\<open>variants\<close> refers to an Isabelle-configuration attribute that permits
to steer the different versions a \<^LaTeX>-presentation of the integrated source.
For further information of the root classes such as \<^typ>\<open>figure\<close>'s, please consult the ontology
\<^theory>\<open>Isabelle_DOF.Isa_COL\<close> directly.
COL finally provides macros that extend the command-language of the DOF-core by the following
abbreviations:
\<^item> \<open>derived_text_element\<close> :
\<^rail>\<open>
( ( @@{command "chapter*"}
| @@{command "section*"} | @@{command "subsection*"} | @@{command "subsubsection*"}
| @@{command "paragraph*"} | @@{command "subparagraph*"}
| @@{command "figure*"} | @@{command "side_by_side_figure*"}
)
\<newline>
'[' meta_args ']' '\<open>' text '\<close>'
)
\<close>
\<close>
text\<open> Note that the command syntax follows the implicit convention to add a "*" to
the command in order to distinguish them from the standard Isabelle text-commands
which are not "ontology-aware" but function similar otherwise.\<close>
subsection*["text-elements"::technical]\<open>The Ontology \<^theory>\<open>Isabelle_DOF.scholarly_paper\<close>\<close>
(*<*)
ML\<open>val toLaTeX = String.translate (fn c => if c = #"_" then "\\_" else String.implode[c])\<close>
ML\<open>writeln (DOF_core.print_doc_class_tree
@{context} (fn (n,l) => String.isPrefix "scholarly_paper" l
orelse String.isPrefix "Isa_COL" l)
toLaTeX)\<close>
(*>*)
text\<open> The \<^verbatim>\<open>scholarly_paper\<close> ontology is oriented towards the classical domains in science:
\<^enum> mathematics
\<^enum> informatics
\<^enum> natural sciences
\<^enum> technology and/or engineering
It extends \<^verbatim>\<open>COL\<close> by the following concepts:
\begin{center}
\begin{minipage}{.9\textwidth}
\dirtree{%
.0 .
.1 scholarly\_paper.title.
.1 scholarly\_paper.subtitle.
.1 scholarly\_paper.author\DTcomment{An Author Entity Declaration}.
.1 scholarly\_paper.abstract.
.1 Isa\_COL.text\_element.
.2 scholarly\_paper.text\_section\DTcomment{Major Paper Text-Elements}.
.3 scholarly\_paper.introduction\DTcomment{...}.
.3 scholarly\_paper.conclusion\DTcomment{...}.
.4 scholarly\_paper.related\_work\DTcomment{...}.
.3 scholarly\_paper.bibliography\DTcomment{...}.
.3 scholarly\_paper.annex\DTcomment{...}.
.3 scholarly\_paper.example\DTcomment{Example in General Sense}.
.3 scholarly\_paper.technical\DTcomment{Root for Technical Content}.
.4 scholarly\_paper.math\_content\DTcomment{...}.
.5 scholarly\_paper.definition\DTcomment{Freeform}.
.5 scholarly\_paper.lemma\DTcomment{Freeform}.
.5 scholarly\_paper.theorem\DTcomment{Freeform}.
.5 scholarly\_paper.corollary\DTcomment{Freeform}.
.5 scholarly\_paper.math\_example\DTcomment{Freeform}.
.5 scholarly\_paper.math\_semiformal\DTcomment{Freeform}.
.5 scholarly\_paper.math\_formal\DTcomment{Formal(=Checked) Content}.
.6 scholarly\_paper.assertion\DTcomment{Assertions}.
.4 scholarly\_paper.tech\_example\DTcomment{...}.
.4 scholarly\_paper.math\_motivation\DTcomment{...}.
.4 scholarly\_paper.math\_explanation\DTcomment{...}.
.4 scholarly\_paper.engineering\_content\DTcomment{...}.
.5 scholarly\_paper.data.
.5 scholarly\_paper.evaluation.
.5 scholarly\_paper.experiment.
.4 ...
.1 ...
.1 scholarly\_paper.article\DTcomment{The Paper Monitor}.
.1 \ldots.
}
\end{minipage}
\end{center}
TODO: There are some slight problems in the hierarchy ...
\<close>
text\<open>A pivotal abstract class in the hierarchy is:
@{boxed_theory_text [display]
\<open>
doc_class text_section = text_element +
main_author :: "author option" <= None
fixme_list :: "string list" <= "[]"
level :: "int option" <= "None"
\<close>}
Besides attributes of more practical considerations like a fixme-list, that can be modified during
the editing process but is only visible in the integrated source but usually ignored in the
\<^LaTeX>, this class also introduces the possibility to assign an "ownership" or "responsibility" of
a text-element to a specific author. Note that this is possible since \<^isadof> assigns to each
document class also a class-type which is declared in the HOL environment.\<close>
(*<*)
declare_reference*["text-elements-expls"::example]
(*>*)
text*[s23::example, main_author = "Some(@{docitem \<open>bu\<close>}::author)"]\<open>
Recall that concrete authors can be denoted by term-antiquotations generated by \<^isadof>; for example,
this may be for a text fragment like
@{boxed_theory_text [display]
\<open>text*[\<dots>::example, main_author = "Some(@{docitem ''bu''}::author)"] \<open>\<open>\<close> \<dots> \<open>\<close>\<close>\<close>}
or
@{boxed_theory_text [display]
\<open>text*[\<dots>::example, main_author = "Some(@{docitem \<open>bu\<close>}::author)"] \<open>\<open>\<close> \<dots> \<open>\<close>\<close>\<close>}
where \<^boxed_theory_text>\<open>"''bu''"\<close> is a string presentation of the reference to the author
text element (see below in @{docitem (unchecked) \<open>text-elements-expls\<close>}).
\<close>
text\<open>Some of these concepts were supported as command-abbreviations leading to the extension
of the \<^isadof> language:
\<^item> \<open>derived_text_elements \<close> :
\<^rail>\<open>
( ( @@{command "author*"}
| @@{command "abstract*"}
| @@{command "Definition*"} | @@{command "Lemma*"} | @@{command "Theorem*"}
)
\<newline>
'[' meta_args ']' '\<open>' text '\<close>'
)
| @@{command "assert*"} '[' meta_args ']' '\<open>' term '\<close>'
\<close>
\<close>
text\<open>Usually, command macros for text elements will assign to the default class corresponding for this
class. For pragmatic reasons, \<^theory_text>\<open>Definition*\<close>, \<^theory_text>\<open>Lemma*\<close> and \<^theory_text>\<open>Theorem*\<close> represent an exception
of this rule and are set up such that the default class is the super class @{typ \<open>math_content\<close>}
(rather than to the class @{typ \<open>definition\<close>}).
This way, it is possible to use these macros for several different sorts of the very generic
concept "definition", which can be used as a freeform mathematical definition but also for a
freeform terminological definition as used in certification standards. Moreover, new subclasses
of @{typ \<open>math_content\<close>} might be introduced in a derived ontology with an own specific layout
definition.
\<close>
text\<open>While this library is intended to give a lot of space to freeform text elements in
order to counterbalance Isabelle's standard view, it should not be forgot that the real strength
of Isabelle is its ability to handle both - and to establish links between both worlds.
Therefore the formal assertion command has been integrated to capture some form of formal content.\<close>
subsubsection*["text-elements-expls"::example]\<open>Examples\<close>
text\<open>
While the default user interface for class definitions via the
\<^boxed_theory_text>\<open>text*\<open> ... \<close>\<close>-command allow to access all features of the document
class, \<^isadof> provides short-hands for certain, widely-used, concepts such as
\<^boxed_theory_text>\<open>title*\<open> ... \<close>\<close> or \<^boxed_theory_text>\<open>section*\<open> ... \<close>\<close>, \<^eg>:
@{boxed_theory_text [display]\<open> @{boxed_theory_text [display]\<open>
doc_class assertions = title*[title::title]\<open>Isabelle/DOF\<close>
properties :: "term list" subtitle*[subtitle::subtitle]\<open>User and Implementation Manual\<close>
author*[adb::author, email="\<open>a.brucker@exeter.ac.uk\<close>",
doc_class "thms" = orcid="\<open>0000-0002-6355-1200\<close>", http_site="\<open>https://brucker.ch/\<close>",
properties :: "thm list" affiliation="\<open>University of Exeter, Exeter, UK\<close>"] \<open>Achim D. Brucker\<close>
author*[bu::author, email = "\<open>wolff@lri.fr\<close>",
affiliation = "\<open>Université Paris-Saclay, LRI, Paris, France\<close>"]\<open>Burkhart Wolff\<close>
\<close>}
\<close>
text\<open>Assertions allow for logical statements to be checked in the global context).
This is particularly useful to explore formal definitions wrt. to their border cases. \<close>
assert*[ass1::assertion, short_name = "\<open>This is an assertion\<close>"] \<open>last [3] < (4::int)\<close>
text\<open>We want to check the consequences of this definition and can add the following statements:
@{boxed_theory_text [display]\<open>
text*[claim::assertion]\<open>For non-empty lists, our definition yields indeed
the last element of a list.\<close>
assert*[claim1::assertion] "last[4::int] = 4"
assert*[claim2::assertion] "last[1,2,3,4::int] = 4"
\<close>} \<close>}
\<close> \<close>
subsubsection\<open>Example: Text Elemens with Levels\<close>
text\<open>
As mentioned before, the command macros of \<^theory_text>\<open>Definition*\<close>, \<^theory_text>\<open>Lemma*\<close> and \<^theory_text>\<open>Theorem*\<close>
set the default class to the super-class of @{typ \<open>definition\<close>}.
However, in order to avoid the somewhat tedious consequence:
@{boxed_theory_text [display]
\<open>Theorem*[T1::"theorem", short_name="\<open>DF definition captures deadlock-freeness\<close>"] \<open>\<open>\<close> \<dots> \<open>\<close>\<close>\<close>}
the choice of the default class can be influenced by setting globally an attribute such as
@{boxed_theory_text [display]
\<open>declare[[ Definition_default_class = "definition"]]
declare[[ Theorem_default_class = "theorem"]]
\<close>}
which allows the above example be shortened to:
@{boxed_theory_text [display]
\<open>Theorem*[T1, short_name="\<open>DF definition captures deadlock-freeness\<close>"] \<open>\<open>\<close> \<dots> \<open>\<close>\<close>
\<close>}
\<close>
subsection\<open>The Ontology \<^theory>\<open>Isabelle_DOF.technical_report\<close>\<close>
(*<*)
ML\<open>val toLaTeX = String.translate (fn c => if c = #"_" then "\\_" else String.implode[c])\<close>
ML\<open>writeln (DOF_core.print_doc_class_tree
@{context} (fn (n,l) => true (* String.isPrefix "technical_report" l
orelse String.isPrefix "Isa_COL" l *))
toLaTeX)\<close>
(*>*)
text\<open> The \<^verbatim>\<open>technical_report\<close> ontology extends \<^verbatim>\<open>scholarly_paper\<close> by concepts needed
for larger reports in the domain of mathematics and engineering. The concepts are fairly
high-level arranged at root-class level,
%
\begin{center}
\begin{minipage}{.9\textwidth}
\dirtree{%
.0 .
.1 technical\_report.front\_matter\DTcomment{...}.
.1 technical\_report.table\_of\_contents\DTcomment{...}.
.1 Isa\_COL.text\_element\DTcomment{...}.
.2 scholarly\_paper.text\_section\DTcomment{...}.
.4 technical\_report.code\DTcomment{...}.
.5 technical\_report.SML\DTcomment{...}.
.5 technical\_report.ISAR\DTcomment{...}.
.5 technical\_report.LATEX\DTcomment{...}.
.1 technical\_report.index\DTcomment{...}.
.1 ...
.1 technical\_report.report\DTcomment{...}.
}
\end{minipage}
\end{center}
\<close>
subsection\<open>A Domain-Specific Ontology: \<^theory>\<open>Isabelle_DOF.CENELEC_50128\<close>\<close>
(*<*)
ML\<open>val toLaTeX = String.translate (fn c => if c = #"_" then "\\_" else String.implode[c])\<close>
ML\<open>writeln (DOF_core.print_doc_class_tree
@{context} (fn (n,l) => true (* String.isPrefix "technical_report" l
orelse String.isPrefix "Isa_COL" l *))
toLaTeX)\<close>
(*>*)
text\<open> The \<^verbatim>\<open>CENELEC_50128\<close> is qn exqmple of q domqin-specific ontology. It
is based on \<^verbatim>\<open>technical_report\<close> since we assume that this kind of format will be most
appropriate for this type of long-and-tedious documents,
%
\begin{center}
\begin{minipage}{.9\textwidth}
\dirtree{%
.0 .
.1 CENELEC\_50128.judgement\DTcomment{...}.
.1 CENELEC\_50128.test\_item\DTcomment{...}.
.2 CENELEC\_50128.test\_case\DTcomment{...}.
.2 CENELEC\_50128.test\_tool\DTcomment{...}.
.2 CENELEC\_50128.test\_result\DTcomment{...}.
.2 CENELEC\_50128.test\_adm\_role\DTcomment{...}.
.2 CENELEC\_50128.test\_environment\DTcomment{...}.
.2 CENELEC\_50128.test\_requirement\DTcomment{...}.
.2 CENELEC\_50128.test\_specification\DTcomment{...}.
.1 CENELEC\_50128.objectives\DTcomment{...}.
.1 CENELEC\_50128.design\_item\DTcomment{...}.
.2 CENELEC\_50128.interface\DTcomment{...}.
.1 CENELEC\_50128.sub\_requirement\DTcomment{...}.
.1 CENELEC\_50128.test\_documentation\DTcomment{...}.
.1 Isa\_COL.text\_element\DTcomment{...}.
.2 CENELEC\_50128.requirement\DTcomment{...}.
.3 CENELEC\_50128.AC\DTcomment{...}.
.4 CENELEC\_50128.EC\DTcomment{...}.
.5 CENELEC\_50128.SRAC\DTcomment{...}.
.3 CENELEC\_50128.TC\DTcomment{...}.
.3 CENELEC\_50128.FnI\DTcomment{...}.
.3 CENELEC\_50128.SIR\DTcomment{...}.
.3 CENELEC\_50128.CoAS\DTcomment{...}.
.3 CENELEC\_50128.HtbC\DTcomment{...}.
.3 CENELEC\_50128.SILA\DTcomment{...}.
.3 CENELEC\_50128.assumption\DTcomment{...}.
.3 CENELEC\_50128.hypothesis\DTcomment{...}.
.4 CENELEC\_50128.security\_hyp\DTcomment{...}.
.3 CENELEC\_50128.safety\_requirement\DTcomment{...}.
.2 CENELEC\_50128.cenelec\_text\DTcomment{...}.
.3 CENELEC\_50128.SWAS\DTcomment{...}.
.3 [...].
.2 scholarly\_paper.text\_section\DTcomment{...}.
.3 scholarly\_paper.technical\DTcomment{...}.
.4 scholarly\_paper.math\_content\DTcomment{...}.
.5 CENELEC\_50128.semi\_formal\_content\DTcomment{...}.
.1 ...
}
\end{minipage}
\end{center}
\<close>
(* TODO : Rearrange ontology hierarchies. *)
subsubsection\<open>Examples\<close>
text\<open> text\<open>
The category ``exported constraint (EC)'' is, in the file The category ``exported constraint (EC)'' is, in the file
\<^file>\<open>../../../src/ontologies/CENELEC_50128/CENELEC_50128.thy\<close> defined as follows: \<^file>\<open>../../../src/ontologies/CENELEC_50128/CENELEC_50128.thy\<close> defined as follows:
@ -480,85 +829,9 @@ can now be defined as follows:
\end{ltx} \end{ltx}
\<close> \<close>
subsubsection\<open>Example: Assertions\<close>
text\<open>Assertions are a common feature to validate properties of models, presented as a collection
of Isabelle/HOL definitions. They are particularly relevant for highlighting corner cases of a
formal model. For example, assume a definition: \<close>
definition last :: "'a list \<Rightarrow> 'a" where "last S = hd(rev S)"
(* Old stuff using abstract classes.
(*<*)
text*[claim::assertions]\<open>For non-empty lists, our definition yields indeed the last element of a list.\<close>
assert*[claim::assertions] "last[4::int] = 4"
assert*[claim::assertions] "last[1,2,3,4::int] = 4"
(*>*)
*)
text\<open>We want to check the consequences of this definition and can add the following statements:
@{boxed_theory_text [display]\<open>
text*[claim::assertions]\<open>For non-empty lists, our definition yields indeed
the last element of a list.\<close>
assert*[claim1::assertions] "last[4::int] = 4"
assert*[claim2::assertions] "last[1,2,3,4::int] = 4"
\<close>}
\<close>
text\<open>As an \<^boxed_theory_text>\<open>ASSERTION_ALIKES\<close>, the \<^boxed_theory_text>\<open>assertions\<close> class possesses a
\<^boxed_theory_text>\<open>properties\<close> attribute. The \<^boxed_theory_text>\<open>assert*\<close> command evaluates its argument;
in case it evaluates to true the property is added to the property list of the \<^boxed_theory_text>\<open>claim\<close> -
text-element. Commands like \<^boxed_theory_text>\<open>Definitions*\<close> or \<^boxed_theory_text>\<open>Theorem*\<close> work analogously.\<close>
subsection*["text-elements"::technical]\<open>Annotatable Top-level Text-Elements\<close> subsubsection\<open>For Isabelle Hackers: Defining New Top-Level Commands\<close>
text\<open>
While the default user interface for class definitions via the
\<^boxed_theory_text>\<open>text*\<open> ... \<close>\<close>-command allow to access all features of the document
class, \<^isadof> provides short-hands for certain, widely-used, concepts such as
\<^boxed_theory_text>\<open>title*\<open> ... \<close>\<close> or \<^boxed_theory_text>\<open>section*\<open> ... \<close>\<close>, \<^eg>:
@{boxed_theory_text [display]\<open>
title*[title::title]\<open>Isabelle/DOF\<close>
subtitle*[subtitle::subtitle]\<open>User and Implementation Manual\<close>
text*[adb:: author, email="\<open>a.brucker@exeter.ac.uk\<close>",
orcid="\<open>0000-0002-6355-1200\<close>", http_site="\<open>https://brucker.ch/\<close>",
affiliation="\<open>University of Exeter, Exeter, UK\<close>"] \<open>Achim D. Brucker\<close>
text*[bu::author, email = "\<open>wolff@lri.fr\<close>",
affiliation = "\<open>Université Paris-Saclay, LRI, Paris, France\<close>"]\<open>Burkhart Wolff\<close>
\<close>}
In general, all standard text-elements from the Isabelle document model such
as \<^theory_text>\<open>chapter\<close>, \<^theory_text>\<open>section\<close>, \<^theory_text>\<open>text\<close>, have in the \<^isadof>
implementation their counterparts in the family of text-elements that are ontology-aware,
\<^ie>, they dispose on a meta-argument list that allows to define that a test-element
that has an identity as a text-object labelled as \<open>obj_id\<close>, belongs to a document class
\<open>class_id\<close> that has been defined earlier, and has its class-attributes set with particular
values (which are denotable in Isabelle/HOL mathematical term syntax).
\<^item> \<open>meta_args\<close> :
\<^rail>\<open>(obj_id ('::' class_id) ((attribute '=' term)) * ',')\<close>
\<^item> \<open>rich_meta_args\<close> :
\<^rail>\<open> (obj_id ('::' class_id) ((attribute (('=' | '+=') term)) * ','))\<close>
\<^clearpage>
\<^item> \<open>annotated_text_element\<close> :
\<^rail>\<open>
( ( @@{command "title*"}
| @@{command "subtitle*"}
| @@{command "chapter*"}
| @@{command "section*"} | @@{command "subsection*"}
| @@{command "subsubsection*"} | @@{command "paragraph*"} | @@{command "subparagraph*"}
| @@{command "text*"} | @@{command "figure*"} | @@{command "side_by_side_figure*"}
| @@{command "open_monitor*"} | @@{command "close_monitor*"}
| @@{command "Definition*"} | @@{command "Lemma*"}
)
\<newline>
'[' meta_args ']' '\<open>' text '\<close>'
)
| change_status_command
| inspection_command
\<close>
\<close>
subsubsection\<open>Experts: Defining New Top-Level Commands\<close>
text\<open> text\<open>
Defining such new top-level commands requires some Isabelle knowledge as well as Defining such new top-level commands requires some Isabelle knowledge as well as
@ -619,21 +892,10 @@ schemata:
\end{ltx} \end{ltx}
\<close> \<close>
subsection*["inspections-commands"::technical]\<open>Status and Inspection Commands\<close>
text\<open>
\<^item> \<^isadof> \<open>change_status_command\<close> :
\<^rail>\<open> (@@{command "update_instance*"} '[' rich_meta_args ']')
| (@@{command "declare_reference*"} (obj_id ('::' class_id)))\<close>
\<^item> \<^isadof> \<open>inspection_command\<close> :
\<^rail>\<open> @@{command "print_doc_classes"}
| @@{command "print_doc_items"}
| @@{command "check_doc_global"}\<close>
\<close>
section*["sec:advanced"::technical]\<open>Advanced ODL Concepts\<close>
subsection*["sec:advanced"::technical]\<open>Advanced ODL Concepts\<close> subsection\<open>Meta-types as Types\<close>
subsubsection\<open>Meta-types as Types\<close>
text\<open> text\<open>
To express the dependencies between text elements to the formal To express the dependencies between text elements to the formal
@ -663,7 +925,7 @@ text\<open>
\<close> \<close>
subsubsection*["sec:monitors"::technical]\<open>ODL Monitors\<close> subsection*["sec:monitors"::technical]\<open>ODL Monitors\<close>
text\<open> text\<open>
We call a document class with an accept-clause a \<^emph>\<open>monitor\<close>.\<^bindex>\<open>monitor\<close> Syntactically, an We call a document class with an accept-clause a \<^emph>\<open>monitor\<close>.\<^bindex>\<open>monitor\<close> Syntactically, an
accept-clause\<^index>\<open>accept-clause\<close> contains a regular expression over class identifiers. accept-clause\<^index>\<open>accept-clause\<close> contains a regular expression over class identifiers.
@ -715,7 +977,7 @@ text\<open>
sections.\<close> sections.\<close>
subsubsection*["sec:class_inv"::technical]\<open>ODL Class Invariants\<close> subsection*["sec:class_inv"::technical]\<open>ODL Class Invariants\<close>
text\<open> text\<open>
Ontological classes as described so far are too liberal in many situations. For example, one Ontological classes as described so far are too liberal in many situations. For example, one
would like to express that any instance of a \<^boxed_theory_text>\<open>result\<close> class finally has a would like to express that any instance of a \<^boxed_theory_text>\<open>result\<close> class finally has a
@ -763,6 +1025,119 @@ fun check_result_inv oid {is_monitor:bool} ctxt =
\<close> \<close>
section*[infrastructure::technical]\<open>Technical Infrastructure\<close>
text\<open>
The list of fully supported (\<^ie>, supporting both interactive ontological modeling and
document generation) ontologies and the list of supported document templates can be
obtained by calling \inlinebash|isabelle mkroot_DOF -h| (see \<^technical>\<open>first_project\<close>).
Note that the postfix \inlinebash|-UNSUPPORTED| denotes experimental ontologies or templates
for which further manual setup steps might be required or that are not fully tested. Also note
that the \<^LaTeX>-class files required by the templates need to be already installed on your
system. This is mostly a problem for publisher specific templates (\<^eg>, Springer's
\<^path>\<open>llncs.cls\<close>), which cannot be re-distributed due to copyright restrictions.
\<close>
subsection\<open>Developing Ontologies and their Represenation Mappings\<close>
text\<open>
The document core \<^emph>\<open>may\<close>, but \<^emph>\<open>must\<close> not use Isabelle definitions or proofs for checking the
formal content---this manual is actually an example of a document not containing any proof.
Consequently, the document editing and checking facility provided by \<^isadof> addresses the needs
of common users for an advanced text-editing environment, neither modeling nor proof knowledge is
inherently required.
We expect authors of ontologies to have experience in the use of \<^isadof>, basic modeling (and,
potentially, some basic SML programming) experience, basic \<^LaTeX> knowledge, and, last but not
least, domain knowledge of the ontology to be modeled. Users with experience in UML-like
meta-modeling will feel familiar with most concepts; however, we expect no need for insight in
the Isabelle proof language, for example, or other more advanced concepts.
Technically, ontologies\<^index>\<open>ontology!directory structure\<close> are stored in a directory
\inlinebash|src/ontologies| and consist of a Isabelle theory file and a \<^LaTeX> -style file:
%
\begin{center}
\begin{minipage}{.9\textwidth}
\dirtree{%
.1 .
.2 src.
.3 ontologies\DTcomment{Ontologies}.
.4 ontologies.thy\DTcomment{Ontology Registration}.
.4 scholarly\_paper\DTcomment{scholarly\_paper}.
.5 scholarly\_paper.thy.
.5 DOF-scholarly\_paper.sty.
.4 technical\_report\DTcomment{technical\_paper}.
.5 technical\_report.thy.
.5 DOF-technical\_report.sty.
.4 CENELEC\_50128\DTcomment{CENELEC\_50128}.
.5 CENELEC\_50128.thy.
.5 DOF-CENELEC\_50128.sty.
.4 \ldots.
}
\end{minipage}
\end{center}
\<close>
text\<open>
Developing a new ontology ``\inlinebash|foo|'' requires, from a technical perspective, the
following steps:
\<^item> create a new sub-directory \inlinebash|foo| in the directory \inlinebash|src/ontologies|
\<^item> definition of the ontological concepts, using \<^isadof>'s Ontology Definition Language (ODL), in
a new theory file \<^path>\<open>src/ontologies/foo/foo.thy\<close>.
\<^item> definition of the document representation for the ontological concepts in a \LaTeX-style
file \<^path>\<open>src/ontologies/foo/DOF-foo.sty\<close>
\<^item> registration (as import) of the new ontology in the file.
\<^path>\<open>src/ontologies/ontologies.thy\<close>.
\<^item> activation of the new document setup by executing the install script. You can skip the lengthy
checks for the AFP entries and the installation of the Isabelle patch by using the
\inlinebash|--skip-patch-and-afp| option:
\begin{bash}
ë\prompt{\isadofdirn}ë ./install --skip-patch-and-afp
\end{bash}
\<close>
subsection\<open>Document Templates\<close>
text\<open>
Document-templates\<^index>\<open>document template\<close> define the overall layout (page size, margins, fonts,
etc.) of the generated documents and are the the main technical means for implementing layout
requirements that are, \<^eg>, required by publishers or standardization bodies. Document-templates
are stored in a directory
\<^path>\<open>src/document-templates\<close>:\<^index>\<open>document template!directory structure\<close>
\begin{center}
\begin{minipage}{.9\textwidth}
\dirtree{%
.1 .
.2 src.
.3 document-templates\DTcomment{Document templates}.
.4 root-lncs.tex.
.4 root-scrartcl.tex.
.4 root-scrreprt-modern.tex.
.4 root-scrreprt.tex.
}
\end{minipage}
\end{center}
\<close>
text\<open>
Developing a new document template ``\inlinebash|bar|'' requires the following steps:
\<^item> develop a new \<^LaTeX>-template \inlinebash|src/document-templates/root-bar.tex|
\<^item> activation of the new document template by executing the install script. You can skip the lengthy
checks for the AFP entries and the installation of the Isabelle patch by using the
\inlinebash|--skip-patch-and-afp| option:
\begin{bash}
ë\prompt{\isadofdirn}ë ./install --skip-patch-and-afp
\end{bash}
\<close>
text\<open>
As the document generation of \<^isadof> is based
on \<^LaTeX>, the \<^isadof> document templates can (and should) make use of any \<^LaTeX>-classes provided
by publishers or standardization bodies.
\<close>
section*["document-templates"::technical]\<open>Defining Document Templates\<close> section*["document-templates"::technical]\<open>Defining Document Templates\<close>
subsection\<open>The Core Template\<close> subsection\<open>The Core Template\<close>
@ -989,8 +1364,6 @@ text\<open>
(*<*) (*<*)
end end
(*>*) (*>*)

View File

@ -22,7 +22,7 @@ chapter*[isadof_developers::text_section]\<open>Extending \<^isadof>\<close>
text\<open> text\<open>
In this chapter, we describe the basic implementation aspects of \<^isadof>, which is based on In this chapter, we describe the basic implementation aspects of \<^isadof>, which is based on
the following design-decisions: the following design-decisions:
\<^item> the entire \<^isadof> is a ``pure add-on,'' \ie, we deliberately resign on the possibility to \<^item> the entire \<^isadof> is a ``pure add-on,'' \<^ie>, we deliberately resign on the possibility to
modify Isabelle itself. modify Isabelle itself.
\<^item> we made a small exception to this rule: the \<^isadof> package modifies in its installation \<^item> we made a small exception to this rule: the \<^isadof> package modifies in its installation
about 10 lines in the \LaTeX-generator (\path{src/patches/thy_output.ML}). about 10 lines in the \LaTeX-generator (\path{src/patches/thy_output.ML}).

View File

@ -14,9 +14,10 @@
(*<*) (*<*)
theory TR_MyCommentedIsabelle theory TR_MyCommentedIsabelle
imports "Isabelle_DOF.technical_report" imports "Isabelle_DOF.technical_report"
begin begin
setup \<open> DOF_lib.define_shortcut \<^binding>\<open>isabelle\<close> "Isabelle/HOL"\<close> define_shortcut* isabelle \<rightleftharpoons> \<open>Isabelle/HOL\<close>
open_monitor*[this::report] open_monitor*[this::report]
(*>*) (*>*)
@ -181,7 +182,7 @@ ML\<open>
\<close> \<close>
(*>*) (*>*)
text\<open>\<^vs>\<open>-1,0cm\<close>... which we will describe in more detail later. \<close> text\<open>\<^vs>\<open>-1.0cm\<close>... which we will describe in more detail later. \<close>
text\<open>In a way, anti-quotations implement a kind of text\<open>In a way, anti-quotations implement a kind of
literate specification style in text, models, code, proofs, etc., which become alltogether literate specification style in text, models, code, proofs, etc., which become alltogether
@ -483,28 +484,28 @@ text\<open>Note, furthermore, that there is a programming API for the HOL-instan
operators of the HOL logic specific constructors and destructors:\<close> operators of the HOL logic specific constructors and destructors:\<close>
text*[T2::technical]\<open> text*[T2::technical]\<open>
\<^enum> \<^ML>\<open>HOLogic.boolT : typ\<close> \<^vs>\<open>-0,2cm\<close> \<^enum> \<^ML>\<open>HOLogic.boolT : typ\<close> \<^vs>\<open>-0.2cm\<close>
\<^enum> \<^ML>\<open>HOLogic.mk_Trueprop : term -> term\<close>, the embedder of bool to prop fundamental for HOL \<^vs>\<open>-0,2cm\<close> \<^enum> \<^ML>\<open>HOLogic.mk_Trueprop : term -> term\<close>, the embedder of bool to prop fundamental for HOL \<^vs>\<open>-0.2cm\<close>
\<^enum> \<^ML>\<open>HOLogic.dest_Trueprop : term -> term\<close> \<^vs>\<open>-0,2cm\<close> \<^enum> \<^ML>\<open>HOLogic.dest_Trueprop : term -> term\<close> \<^vs>\<open>-0.2cm\<close>
\<^enum> \<^ML>\<open>HOLogic.Trueprop_conv : conv -> conv\<close> \<^vs>\<open>-0,2cm\<close> \<^enum> \<^ML>\<open>HOLogic.Trueprop_conv : conv -> conv\<close> \<^vs>\<open>-0.2cm\<close>
\<^enum> \<^ML>\<open>HOLogic.mk_setT : typ -> typ\<close>, the ML level type constructor set \<^vs>\<open>-0,2cm\<close> \<^enum> \<^ML>\<open>HOLogic.mk_setT : typ -> typ\<close>, the ML level type constructor set \<^vs>\<open>-0.2cm\<close>
\<^enum> \<^ML>\<open>HOLogic.dest_setT : typ -> typ\<close>, the ML level type destructor for set \<^vs>\<open>-0,2cm\<close> \<^enum> \<^ML>\<open>HOLogic.dest_setT : typ -> typ\<close>, the ML level type destructor for set \<^vs>\<open>-0.2cm\<close>
\<^enum> \<^ML>\<open>HOLogic.Collect_const : typ -> term\<close> \<^vs>\<open>-0,2cm\<close> \<^enum> \<^ML>\<open>HOLogic.Collect_const : typ -> term\<close> \<^vs>\<open>-0.2cm\<close>
\<^enum> \<^ML>\<open>HOLogic.mk_Collect : string * typ * term -> term\<close> \<^vs>\<open>-0,2cm\<close> \<^enum> \<^ML>\<open>HOLogic.mk_Collect : string * typ * term -> term\<close> \<^vs>\<open>-0.2cm\<close>
\<^enum> \<^ML>\<open>HOLogic.mk_mem : term * term -> term\<close> \<^vs>\<open>-0,2cm\<close> \<^enum> \<^ML>\<open>HOLogic.mk_mem : term * term -> term\<close> \<^vs>\<open>-0.2cm\<close>
\<^enum> \<^ML>\<open>HOLogic.dest_mem : term -> term * term\<close> \<^vs>\<open>-0,2cm\<close> \<^enum> \<^ML>\<open>HOLogic.dest_mem : term -> term * term\<close> \<^vs>\<open>-0.2cm\<close>
\<^enum> \<^ML>\<open>HOLogic.mk_set : typ -> term list -> term\<close> \<^vs>\<open>-0,2cm\<close> \<^enum> \<^ML>\<open>HOLogic.mk_set : typ -> term list -> term\<close> \<^vs>\<open>-0.2cm\<close>
\<^enum> \<^ML>\<open>HOLogic.conj_intr : Proof.context -> thm -> thm -> thm\<close>, some HOL-level derived-inferences \<^vs>\<open>-0,2cm\<close> \<^enum> \<^ML>\<open>HOLogic.conj_intr : Proof.context -> thm -> thm -> thm\<close>, some HOL-level derived-inferences \<^vs>\<open>-0.2cm\<close>
\<^enum> \<^ML>\<open>HOLogic.conj_elim : Proof.context -> thm -> thm * thm\<close> \<^vs>\<open>-0,2cm\<close> \<^enum> \<^ML>\<open>HOLogic.conj_elim : Proof.context -> thm -> thm * thm\<close> \<^vs>\<open>-0.2cm\<close>
\<^enum> \<^ML>\<open>HOLogic.conj_elims : Proof.context -> thm -> thm list\<close> \<^vs>\<open>-0,2cm\<close> \<^enum> \<^ML>\<open>HOLogic.conj_elims : Proof.context -> thm -> thm list\<close> \<^vs>\<open>-0.2cm\<close>
\<^enum> \<^ML>\<open>HOLogic.conj : term\<close> , some ML level logical constructors \<^vs>\<open>-0,2cm\<close> \<^enum> \<^ML>\<open>HOLogic.conj : term\<close> , some ML level logical constructors \<^vs>\<open>-0.2cm\<close>
\<^enum> \<^ML>\<open>HOLogic.disj : term\<close> \<^vs>\<open>-0,2cm\<close> \<^enum> \<^ML>\<open>HOLogic.disj : term\<close> \<^vs>\<open>-0.2cm\<close>
\<^enum> \<^ML>\<open>HOLogic.imp : term\<close> \<^vs>\<open>-0,2cm\<close> \<^enum> \<^ML>\<open>HOLogic.imp : term\<close> \<^vs>\<open>-0.2cm\<close>
\<^enum> \<^ML>\<open>HOLogic.Not : term\<close> \<^vs>\<open>-0,2cm\<close> \<^enum> \<^ML>\<open>HOLogic.Not : term\<close> \<^vs>\<open>-0.2cm\<close>
\<^enum> \<^ML>\<open>HOLogic.mk_not : term -> term\<close> \<^vs>\<open>-0,2cm\<close> \<^enum> \<^ML>\<open>HOLogic.mk_not : term -> term\<close> \<^vs>\<open>-0.2cm\<close>
\<^enum> \<^ML>\<open>HOLogic.mk_conj : term * term -> term\<close> \<^vs>\<open>-0,2cm\<close> \<^enum> \<^ML>\<open>HOLogic.mk_conj : term * term -> term\<close> \<^vs>\<open>-0.2cm\<close>
\<^enum> \<^ML>\<open>HOLogic.dest_conj : term -> term list\<close> \<^vs>\<open>-0,2cm\<close> \<^enum> \<^ML>\<open>HOLogic.dest_conj : term -> term list\<close> \<^vs>\<open>-0.2cm\<close>
\<^enum> \<^ML>\<open>HOLogic.conjuncts : term -> term list\<close> \<^vs>\<open>-0,2cm\<close> \<^enum> \<^ML>\<open>HOLogic.conjuncts : term -> term list\<close> \<^vs>\<open>-0.2cm\<close>
\<^enum> ... \<^enum> ...
\<close> \<close>
@ -702,7 +703,7 @@ proof - fix a :: nat
subsection*[t233::technical]\<open> Theories and the Signature API\<close> subsection*[t233::technical]\<open> Theories and the Signature API\<close>
text\<open> text\<open>
\<^enum> \<^ML>\<open>Sign.tsig_of : theory -> Type.tsig\<close> extraxts the type-signature of a theory \<^enum> \<^ML>\<open>Sign.tsig_of : theory -> Type.tsig\<close> extracts the type-signature of a theory
\<^enum> \<^ML>\<open>Sign.syn_of : theory -> Syntax.syntax\<close> extracts the constant-symbol signature \<^enum> \<^ML>\<open>Sign.syn_of : theory -> Syntax.syntax\<close> extracts the constant-symbol signature
\<^enum> \<^ML>\<open>Sign.of_sort : theory -> typ * sort -> bool\<close> decides that a type belongs to a sort. \<^enum> \<^ML>\<open>Sign.of_sort : theory -> typ * sort -> bool\<close> decides that a type belongs to a sort.
\<close> \<close>
@ -2306,8 +2307,8 @@ text\<open> This interactive Isabelle Programming Cook-Book represents my curren
\<close> \<close>
(*<*) (*<*)
paragraph\<open>Many thanks to Frederic Tuong, who contributed some example such as the string cartouche paragraph\<open>Many thanks to Frederic Tuong, who contributed some example such as the string
for Unicode Character Denotations as well as many local hints for improvements.\<close> cartouche for Unicode Character Denotations as well as many local hints for improvements.\<close>
section*[bib::bibliography]\<open>Bibliography\<close> section*[bib::bibliography]\<open>Bibliography\<close>

View File

@ -23,12 +23,11 @@ text\<open> Building a fundamental infrastructure for common document elements s
theory Isa_COL theory Isa_COL
imports Isa_DOF imports Isa_DOF
keywords "title*" "subtitle*" "chapter*" keywords "title*" "subtitle*"
"section*" "subsection*" "subsubsection*" "chapter*" "section*"
"paragraph*" "subparagraph*" :: document_body "subsection*" "subsubsection*"
and "figure*" "side_by_side_figure*" :: document_body "paragraph*" "subparagraph*"
and "assert*" :: thy_decl "figure*" "side_by_side_figure*" :: document_body
begin begin
@ -98,7 +97,7 @@ fun transform_cid thy NONE X = X
in if DOF_core.is_subclass_global thy sub_cid_long cid_long in if DOF_core.is_subclass_global thy sub_cid_long cid_long
then (SOME (sub_cid,pos)) then (SOME (sub_cid,pos))
else (* (SOME (sub_cid,pos)) *) else (* (SOME (sub_cid,pos)) *)
(* BUG : check reveals problem of Definition* misuse. *) (* BUG : check reveals problem of Definition* misuse. *)
error("class "^sub_cid_long^ error("class "^sub_cid_long^
" must be sub-class of "^cid_long) " must be sub-class of "^cid_long)
end end
@ -120,30 +119,6 @@ fun enriched_document_cmd_exp ncid (S: (string * string) list) =
end; end;
end (* local *) end (* local *)
fun assertion_cmd'((((((oid,pos),cid_pos),doc_attrs),name_opt:string option),modes : string list),
prop) =
let fun conv_2_holstring thy = (bstring_to_holstring (Proof_Context.init_global thy))
fun conv_attrs thy = (("properties",pos),"[@{termrepr ''"^conv_2_holstring thy prop ^" ''}]")
::doc_attrs
fun conv_attrs' thy = map (fn ((lhs,pos),rhs) => (((lhs,pos),"+="),rhs)) (conv_attrs thy)
fun mks thy = case DOF_core.get_object_global_opt oid thy of
SOME NONE => (error("update of declared but not created doc_item:" ^ oid))
| SOME _ => (update_instance_command (((oid,pos),cid_pos),conv_attrs' thy) thy)
| NONE => (create_and_check_docitem
{is_monitor = false} {is_inline = false}
oid pos cid_pos (conv_attrs thy) thy)
val check = (assert_cmd name_opt modes prop) o Proof_Context.init_global
in
(* Toplevel.keep (check o Toplevel.context_of) *)
Toplevel.theory (fn thy => (check thy; mks thy))
end
val _ =
Outer_Syntax.command @{command_keyword "assert*"}
"evaluate and print term"
(attributes -- opt_evaluator -- opt_modes -- Parse.term >> assertion_cmd');
val _ = val _ =
Outer_Syntax.command ("title*", @{here}) "section heading" Outer_Syntax.command ("title*", @{here}) "section heading"
@ -251,172 +226,20 @@ val _ =
end end
\<close> \<close>
section\<open>Shortcuts, Macros, Environments\<close> (*<*)
text\<open>The features described in this section are actually \<^emph>\<open>not\<close> real ISADOF features, rather a (*
slightly more abstract layer over somewhat buried standard features of the Isabelle document ML\<open>ML_Context.expression\<close>
generator ... (Thanks to Makarius) Conceptually, they are \<^emph>\<open>sub-text-elements\<close>. \<close> fun setup source =
ML_Context.expression (Input.pos_of source)
(ML_Lex.read "Theory.setup (" @ ML_Lex.read_source source @ ML_Lex.read ")")
|> Context.theory_map;
setup\<open>\<close>
text\<open>This module provides mechanisms to define front-end checked:
\<^enum> \<^emph>\<open>shortcuts\<close>, i.e. machine-checked abbreviations without arguments
that were mapped to user-defined LaTeX code (Example: \<^verbatim>\<open>\ie\<close>)
\<^enum> \<^emph>\<open>macros\<close> with one argument that were mapped to user-defined code. Example: \<^verbatim>\<open>\myurl{bla}\<close>.
The argument can be potentially checked and reports can be sent to PIDE;
if no such checking is desired, this can be expressed by setting the
\<^theory_text>\<open>reportNtest\<close>-parameter to \<^theory_text>\<open>K(K())\<close>.
\<^enum> \<^emph>\<open>macros\<close> with two arguments, potentially independently checked. See above.
Example: \<^verbatim>\<open>\myurl[ding]{dong}\<close>,
\<^enum> \<^emph>\<open>boxes\<close> which are more complex sub-text-elements in the line of the \<^verbatim>\<open>verbatim\<close> or
\<^verbatim>\<open>theory_text\<close> environments.
Note that we deliberately refrained from a code-template definition mechanism for simplicity,
so the patterns were just described by strings. No additional ado with quoting/unquoting
mechanisms ...
\<close>
ML\<open>
structure DOF_lib =
struct
fun define_shortcut name latexshcut =
Thy_Output.antiquotation_raw name (Scan.succeed ())
(fn _ => fn () => Latex.string latexshcut)
(* This is a generalization of the Isabelle2020 function "control_antiquotation" from
document_antiquotations.ML. (Thanks Makarius!) *)
fun define_macro name s1 s2 reportNtest =
Thy_Output.antiquotation_raw_embedded name (Scan.lift Args.cartouche_input)
(fn ctxt =>
fn src => let val () = reportNtest ctxt src
in src |> Latex.enclose_block s1 s2
o Thy_Output.output_document ctxt {markdown = false}
end);
local (* hide away really strange local construction *)
fun enclose_body2 front body1 middle body2 post =
(if front = "" then [] else [Latex.string front]) @ body1 @
(if middle = "" then [] else [Latex.string middle]) @ body2 @
(if post = "" then [] else [Latex.string post]);
in
fun define_macro2 name front middle post reportNtest1 reportNtest2 =
Thy_Output.antiquotation_raw_embedded name (Scan.lift ( Args.cartouche_input
-- Args.cartouche_input))
(fn ctxt =>
fn (src1,src2) => let val () = reportNtest1 ctxt src1
val () = reportNtest2 ctxt src2
val T1 = Thy_Output.output_document ctxt {markdown = false} src1
val T2 = Thy_Output.output_document ctxt {markdown = false} src2
in Latex.block(enclose_body2 front T1 middle T2 post)
end);
end
fun report_text ctxt text =
let val pos = Input.pos_of text in
Context_Position.reports ctxt
[(pos, Markup.language_text (Input.is_delimited text)),
(pos, Markup.raw_text)]
end;
fun report_theory_text ctxt text =
let val keywords = Thy_Header.get_keywords' ctxt;
val _ = report_text ctxt text;
val _ =
Input.source_explode text
|> Token.tokenize keywords {strict = true}
|> maps (Token.reports keywords)
|> Context_Position.reports_text ctxt;
in () end
fun prepare_text ctxt =
Input.source_content #> #1 #> Document_Antiquotation.prepare_lines ctxt;
(* This also produces indent-expansion and changes space to "\_" and the introduction of "\newline",
I believe. Otherwise its in Thy_Output.output_source, the compiler from string to LaTeX.text. *)
fun string_2_text_antiquotation ctxt text =
prepare_text ctxt text
|> Thy_Output.output_source ctxt
|> Thy_Output.isabelle ctxt
fun string_2_theory_text_antiquotation ctxt text =
let
val keywords = Thy_Header.get_keywords' ctxt;
in
prepare_text ctxt text
|> Token.explode0 keywords
|> maps (Thy_Output.output_token ctxt)
|> Thy_Output.isabelle ctxt
end
fun gen_text_antiquotation name reportNcheck compile =
Thy_Output.antiquotation_raw_embedded name (Scan.lift Args.text_input)
(fn ctxt => fn text:Input.source =>
let
val _ = reportNcheck ctxt text;
in
compile ctxt text
end);
fun std_text_antiquotation name (* redefined in these more abstract terms *) =
gen_text_antiquotation name report_text string_2_text_antiquotation
(* should be the same as (2020):
fun text_antiquotation name =
Thy_Output.antiquotation_raw_embedded name (Scan.lift Args.text_input)
(fn ctxt => fn text =>
let
val _ = report_text ctxt text;
in
prepare_text ctxt text
|> Thy_Output.output_source ctxt
|> Thy_Output.isabelle ctxt
end);
*) *)
(*>*)
fun std_theory_text_antiquotation name (* redefined in these more abstract terms *) =
gen_text_antiquotation name report_theory_text string_2_theory_text_antiquotation
(* should be the same as (2020):
fun theory_text_antiquotation name =
Thy_Output.antiquotation_raw_embedded name (Scan.lift Args.text_input)
(fn ctxt => fn text =>
let
val keywords = Thy_Header.get_keywords' ctxt;
val _ = report_text ctxt text;
val _ =
Input.source_explode text
|> Token.tokenize keywords {strict = true}
|> maps (Token.reports keywords)
|> Context_Position.reports_text ctxt;
in
prepare_text ctxt text
|> Token.explode0 keywords
|> maps (Thy_Output.output_token ctxt)
|> Thy_Output.isabelle ctxt
|> enclose_env ctxt "isarbox"
end);
*)
fun environment_delim name =
("%\n\\begin{" ^ Latex.output_name name ^ "}\n",
"\n\\end{" ^ Latex.output_name name ^ "}");
fun environment_block name = environment_delim name |-> Latex.enclose_body #> Latex.block;
fun enclose_env verbatim ctxt block_env body =
if Config.get ctxt Document_Antiquotation.thy_output_display
then if verbatim
then environment_block block_env [body]
else Latex.environment_block block_env [body]
else Latex.block ([Latex.string ("\\inline"^block_env ^"{")] @ [body] @ [ Latex.string ("}")]);
end
\<close>
section\<open>Tables\<close> section\<open>Tables\<close>
(* TODO ! ! ! *) (* TODO ! ! ! *)
(* dito the future monitor: table - block *) (* dito the future monitor: table - block *)
@ -426,5 +249,4 @@ ML\<open>@{term "side_by_side_figure"};
@{typ "doc_class rexp"}; @{typ "doc_class rexp"};
DOF_core.SPY;\<close> DOF_core.SPY;\<close>
end end

View File

@ -33,19 +33,18 @@ theory Isa_DOF (* Isabelle Document Ontology Framework *)
RegExpInterface (* Interface to functional regular automata for monitoring *) RegExpInterface (* Interface to functional regular automata for monitoring *)
Assert Assert
keywords "+=" ":=" "accepts" "rejects" "invariant" keywords "+=" ":=" "accepts" "rejects" "invariant"
and "open_monitor*" "close_monitor*" "declare_reference*" and "open_monitor*" "close_monitor*"
"update_instance*" "doc_class" ::thy_decl "declare_reference*" "update_instance*"
"doc_class"
"define_shortcut*" "define_macro*" :: thy_decl
and "text*" "text-macro*" :: document_body and "text*" "text-macro*" :: document_body
and "print_doc_classes" "print_doc_items" and "print_doc_classes" "print_doc_items"
"print_doc_class_template" "check_doc_global" :: diag "print_doc_class_template" "check_doc_global" :: diag
(* experimental *)
and "corrollary*" "proposition*" "lemma*" "theorem*" :: thy_decl
(* -- intended replacement of Isar std commands.*)
@ -674,6 +673,21 @@ fun print_doc_classes b ctxt =
writeln "=====================================\n\n\n" writeln "=====================================\n\n\n"
end; end;
fun print_doc_class_tree ctxt P T =
let val {docobj_tab={tab = x, ...},docclass_tab, ...} = get_data ctxt;
val class_tab:(string * docclass_struct)list = (Symtab.dest docclass_tab)
fun is_class_son X (n, dc:docclass_struct) = (X = #inherits_from dc)
fun tree lev ([]:(string * docclass_struct)list) = ""
|tree lev ((n,R)::S) = (if P(lev,n)
then "."^Int.toString lev^" "^(T n)^"{...}.\n"
^ (tree(lev + 1)(filter(is_class_son(SOME([],n)))class_tab))
else "."^Int.toString lev^" ... \n")
^ (tree lev S)
val roots = filter(is_class_son NONE) class_tab
in ".0 .\n" ^ tree 1 roots end
fun check_doc_global (strict_checking : bool) ctxt = fun check_doc_global (strict_checking : bool) ctxt =
let val {docobj_tab={tab = x, ...}, monitor_tab, ...} = get_data ctxt; let val {docobj_tab={tab = x, ...}, monitor_tab, ...} = get_data ctxt;
val S = map_filter (fn (s,NONE) => SOME s | _ => NONE) (Symtab.dest x) val S = map_filter (fn (s,NONE) => SOME s | _ => NONE) (Symtab.dest x)
@ -909,8 +923,8 @@ fun ML_isa_check_docitem thy (term, req_ty, pos) =
| _ => error("can not infer type for: "^ name) | _ => error("can not infer type for: "^ name)
in if cid <> DOF_core.default_cid in if cid <> DOF_core.default_cid
andalso not(DOF_core.is_subclass ctxt cid req_class) andalso not(DOF_core.is_subclass ctxt cid req_class)
then error("reference ontologically inconsistent: "^ then error("reference ontologically inconsistent: "
Position.here pos_decl) ^cid^" vs. "^req_class^ Position.here pos_decl)
else () else ()
end end
else err ("faulty reference to docitem: "^name) pos else err ("faulty reference to docitem: "^name) pos
@ -1473,44 +1487,6 @@ val _ = Thy_Output.set_meta_args_parser
ML \<open>
local (* dull and dangerous copy from Pure.thy given that these functions are not
globally exported. *)
val long_keyword =
Parse_Spec.includes >> K "" ||
Parse_Spec.long_statement_keyword;
val long_statement =
Scan.optional (Parse_Spec.opt_thm_name ":" --| Scan.ahead long_keyword) Binding.empty_atts --
Scan.optional Parse_Spec.includes [] -- Parse_Spec.long_statement
>> (fn ((binding, includes), (elems, concl)) => (true, binding, includes, elems, concl));
val short_statement =
Parse_Spec.statement -- Parse_Spec.if_statement -- Parse.for_fixes
>> (fn ((shows, assumes), fixes) =>
(false, Binding.empty_atts, [], [Element.Fixes fixes, Element.Assumes assumes],
Element.Shows shows));
fun theorem spec schematic descr =
Outer_Syntax.local_theory_to_proof' spec ("state " ^ descr)
((ODL_Command_Parser.attributes -- (long_statement || short_statement))
>> (fn (_ (* skip *) ,(long, binding, includes, elems, concl)) =>
((if schematic then Specification.schematic_theorem_cmd
else Specification.theorem_cmd )
long Thm.theoremK NONE (K I) binding includes elems concl)));
in
(* Half - fake. activates original Isar commands, but skips meta-arguments for the moment. *)
(* tendance deprecated - see new scholarly paper setup. *)
val _ = theorem @{command_keyword "theorem*"} false "theorem";
val _ = theorem @{command_keyword "lemma*"} false "lemma";
val _ = theorem @{command_keyword "corrollary*"} false "corollary";
val _ = theorem @{command_keyword "proposition*"} false "proposition";
end\<close>
section\<open> Syntax for Ontological Antiquotations (the '' View'' Part II) \<close> section\<open> Syntax for Ontological Antiquotations (the '' View'' Part II) \<close>
@ -1533,9 +1509,9 @@ fun check_and_mark ctxt cid_decl (str:{strict_checking: bool}) {inline=inline_re
val markup = docref_markup false name id pos_decl; val markup = docref_markup false name id pos_decl;
val _ = Context_Position.report ctxt pos markup; val _ = Context_Position.report ctxt pos markup;
(* this sends a report for a ref application to the PIDE interface ... *) (* this sends a report for a ref application to the PIDE interface ... *)
val _ = if cid <> DOF_core.default_cid val _ = if not(DOF_core.is_subclass ctxt cid cid_decl)
andalso not(DOF_core.is_subclass ctxt cid cid_decl) then error("reference ontologically inconsistent: "^cid
then error("reference ontologically inconsistent:" ^ Position.here pos_decl) ^" must be subclass of "^cid_decl^ Position.here pos_decl)
else () else ()
in () end in () end
else if DOF_core.is_declared_oid_global name thy else if DOF_core.is_declared_oid_global name thy
@ -1617,7 +1593,6 @@ val _ = Theory.setup
end (* struct *) end (* struct *)
\<close> \<close>
text\<open> @{thm [] refl}\<close>
ML\<open> ML\<open>
structure AttributeAccess = structure AttributeAccess =
@ -1848,7 +1823,201 @@ val _ =
end (* struct *) end (* struct *)
\<close> \<close>
text\<open>dfgd\<close>
section\<open>Shortcuts, Macros, Environments\<close>
text\<open>The features described in this section are actually \<^emph>\<open>not\<close> real ISADOF features, rather a
slightly more abstract layer over somewhat buried standard features of the Isabelle document
generator ... (Thanks to Makarius) Conceptually, they are \<^emph>\<open>sub-text-elements\<close>. \<close>
text\<open>This module provides mechanisms to define front-end checked:
\<^enum> \<^emph>\<open>shortcuts\<close>, i.e. machine-checked abbreviations without arguments
that were mapped to user-defined LaTeX code (Example: \<^verbatim>\<open>\ie\<close>)
\<^enum> \<^emph>\<open>macros\<close> with one argument that were mapped to user-defined code. Example: \<^verbatim>\<open>\myurl{bla}\<close>.
The argument can be potentially checked and reports can be sent to PIDE;
if no such checking is desired, this can be expressed by setting the
\<^theory_text>\<open>reportNtest\<close>-parameter to \<^theory_text>\<open>K(K())\<close>.
\<^enum> \<^emph>\<open>macros\<close> with two arguments, potentially independently checked. See above.
Example: \<^verbatim>\<open>\myurl[ding]{dong}\<close>,
\<^enum> \<^emph>\<open>boxes\<close> which are more complex sub-text-elements in the line of the \<^verbatim>\<open>verbatim\<close> or
\<^verbatim>\<open>theory_text\<close> environments.
Note that we deliberately refrained from a code-template definition mechanism for simplicity,
so the patterns were just described by strings. No additional ado with quoting/unquoting
mechanisms ...
\<close>
ML\<open>
structure DOF_lib =
struct
fun define_shortcut name latexshcut =
Thy_Output.antiquotation_raw name (Scan.succeed ())
(fn _ => fn () => Latex.string latexshcut)
(* This is a generalization of the Isabelle2020 function "control_antiquotation" from
document_antiquotations.ML. (Thanks Makarius!) *)
fun define_macro name s1 s2 reportNtest =
Thy_Output.antiquotation_raw_embedded name (Scan.lift Args.cartouche_input)
(fn ctxt =>
fn src => let val () = reportNtest ctxt src
in src |> Latex.enclose_block s1 s2
o Thy_Output.output_document ctxt {markdown = false}
end);
local (* hide away really strange local construction *)
fun enclose_body2 front body1 middle body2 post =
(if front = "" then [] else [Latex.string front]) @ body1 @
(if middle = "" then [] else [Latex.string middle]) @ body2 @
(if post = "" then [] else [Latex.string post]);
in
fun define_macro2 name front middle post reportNtest1 reportNtest2 =
Thy_Output.antiquotation_raw_embedded name (Scan.lift ( Args.cartouche_input
-- Args.cartouche_input))
(fn ctxt =>
fn (src1,src2) => let val () = reportNtest1 ctxt src1
val () = reportNtest2 ctxt src2
val T1 = Thy_Output.output_document ctxt {markdown = false} src1
val T2 = Thy_Output.output_document ctxt {markdown = false} src2
in Latex.block(enclose_body2 front T1 middle T2 post)
end);
end
fun report_text ctxt text =
let val pos = Input.pos_of text in
Context_Position.reports ctxt
[(pos, Markup.language_text (Input.is_delimited text)),
(pos, Markup.raw_text)]
end;
fun report_theory_text ctxt text =
let val keywords = Thy_Header.get_keywords' ctxt;
val _ = report_text ctxt text;
val _ =
Input.source_explode text
|> Token.tokenize keywords {strict = true}
|> maps (Token.reports keywords)
|> Context_Position.reports_text ctxt;
in () end
fun prepare_text ctxt =
Input.source_content #> #1 #> Document_Antiquotation.prepare_lines ctxt;
(* This also produces indent-expansion and changes space to "\_" and the introduction of "\newline",
I believe. Otherwise its in Thy_Output.output_source, the compiler from string to LaTeX.text. *)
fun string_2_text_antiquotation ctxt text =
prepare_text ctxt text
|> Thy_Output.output_source ctxt
|> Thy_Output.isabelle ctxt
fun string_2_theory_text_antiquotation ctxt text =
let
val keywords = Thy_Header.get_keywords' ctxt;
in
prepare_text ctxt text
|> Token.explode0 keywords
|> maps (Thy_Output.output_token ctxt)
|> Thy_Output.isabelle ctxt
end
fun gen_text_antiquotation name reportNcheck compile =
Thy_Output.antiquotation_raw_embedded name (Scan.lift Args.text_input)
(fn ctxt => fn text:Input.source =>
let
val _ = reportNcheck ctxt text;
in
compile ctxt text
end);
fun std_text_antiquotation name (* redefined in these more abstract terms *) =
gen_text_antiquotation name report_text string_2_text_antiquotation
(* should be the same as (2020):
fun text_antiquotation name =
Thy_Output.antiquotation_raw_embedded name (Scan.lift Args.text_input)
(fn ctxt => fn text =>
let
val _ = report_text ctxt text;
in
prepare_text ctxt text
|> Thy_Output.output_source ctxt
|> Thy_Output.isabelle ctxt
end);
*)
fun std_theory_text_antiquotation name (* redefined in these more abstract terms *) =
gen_text_antiquotation name report_theory_text string_2_theory_text_antiquotation
(* should be the same as (2020):
fun theory_text_antiquotation name =
Thy_Output.antiquotation_raw_embedded name (Scan.lift Args.text_input)
(fn ctxt => fn text =>
let
val keywords = Thy_Header.get_keywords' ctxt;
val _ = report_text ctxt text;
val _ =
Input.source_explode text
|> Token.tokenize keywords {strict = true}
|> maps (Token.reports keywords)
|> Context_Position.reports_text ctxt;
in
prepare_text ctxt text
|> Token.explode0 keywords
|> maps (Thy_Output.output_token ctxt)
|> Thy_Output.isabelle ctxt
|> enclose_env ctxt "isarbox"
end);
*)
fun environment_delim name =
("%\n\\begin{" ^ Latex.output_name name ^ "}\n",
"\n\\end{" ^ Latex.output_name name ^ "}");
fun environment_block name = environment_delim name |-> Latex.enclose_body #> Latex.block;
fun enclose_env verbatim ctxt block_env body =
if Config.get ctxt Document_Antiquotation.thy_output_display
then if verbatim
then environment_block block_env [body]
else Latex.environment_block block_env [body]
else Latex.block ([Latex.string ("\\inline"^block_env ^"{")] @ [body] @ [ Latex.string ("}")]);
end
\<close>
ML\<open>
local
val parse_literal = Parse.alt_string || Parse.cartouche
val parse_define_shortcut = Parse.binding -- ((\<^keyword>\<open>\<rightleftharpoons>\<close> || \<^keyword>\<open>==\<close>) |-- parse_literal)
val define_shortcuts = fold(uncurry DOF_lib.define_shortcut)
in
val _ = Outer_Syntax.command \<^command_keyword>\<open>define_shortcut*\<close> "define LaTeX shortcut"
(Scan.repeat1 parse_define_shortcut >> (Toplevel.theory o define_shortcuts));
end
\<close>
ML\<open>
val parse_literal = Parse.alt_string || Parse.cartouche
val parse_define_shortcut = Parse.binding
-- ((\<^keyword>\<open>\<rightleftharpoons>\<close> || \<^keyword>\<open>==\<close>) |-- parse_literal)
--|Parse.underscore
-- parse_literal
-- (Scan.option (\<^keyword>\<open>(\<close> |-- Parse.ML_source --|\<^keyword>\<open>)\<close>))
fun define_macro (X,NONE) = (uncurry(uncurry(uncurry DOF_lib.define_macro)))(X,K(K()))
|define_macro (X,SOME(src:Input.source)) =
let val check_code = K(K()) (* hack *)
val _ = warning "Checker code support Not Yet Implemented - use ML"
in (uncurry(uncurry(uncurry DOF_lib.define_macro)))(X,check_code)
end;
val _ = Outer_Syntax.command \<^command_keyword>\<open>define_macro*\<close> "define LaTeX shortcut"
(Scan.repeat1 parse_define_shortcut >> (Toplevel.theory o (fold define_macro)));
\<close>
(* (*
ML\<open> ML\<open>
Pretty.text; Pretty.text;

View File

@ -10,7 +10,7 @@ begin
text\<open>We re-use the class @\<open>typ math_content\<close>, which provides also a framework for text\<open>We re-use the class @\<open>typ math_content\<close>, which provides also a framework for
semi-formal terminology, which we re-use by this definition.\<close> semi-formal terminology, which we re-use by this definition.\<close>
doc_class concept_definition = "definition" + doc_class concept_definition = math_content +
status :: status <= "semiformal" status :: status <= "semiformal"
mcc :: math_content_class <= "terminology" mcc :: math_content_class <= "terminology"
tag :: string tag :: string
@ -20,6 +20,9 @@ text\<open>The \<^verbatim>\<open>short_tag\<close>, if set, is used in the pres
type_synonym concept = concept_definition type_synonym concept = concept_definition
declare[[ Definition_default_class="concept_definition"]]
(*>>*) (*>>*)
section \<open>Terminology\<close> section \<open>Terminology\<close>
@ -27,86 +30,86 @@ section \<open>Terminology\<close>
subsection \<open>Terms and definitions common in the CC\<close> subsection \<open>Terms and definitions common in the CC\<close>
Definition* [aas_def::concept, tag= "''adverse actions''"] Definition* [aas_def, tag= "''adverse actions''"]
\<open>actions performed by a threat agent on an asset\<close> \<open>actions performed by a threat agent on an asset\<close>
declare_reference*[toe_def::concept] declare_reference*[toe_def]
Definition* [assts_def::concept, tag="''assets''"] Definition* [assts_def, tag="''assets''"]
\<open>entities that the owner of the @{docitem toe_def} presumably places value upon \<close> \<open>entities that the owner of the @{docitem toe_def} presumably places value upon \<close>
Definition* [asgn_def::concept, tag="''assignment''"] Definition* [asgn_def, tag="''assignment''"]
\<open>the specification of an identified parameter in a component (of the CC) or requirement.\<close> \<open>the specification of an identified parameter in a component (of the CC) or requirement.\<close>
declare_reference*[sfrs_def::concept] declare_reference*[sfrs_def]
Definition* [assrc_def::concept, tag="''assurance''"] Definition* [assrc_def, tag="''assurance''"]
\<open>grounds for confidence that a @{docitem toe_def} meets the @{docitem sfrs_def}\<close> \<open>grounds for confidence that a @{docitem toe_def} meets the @{docitem sfrs_def}\<close>
Definition* [attptl_def::concept, tag="''attack potential''"] Definition* [attptl_def, tag="''attack potential''"]
\<open>measure of the effort to be expended in attacking a TOE, expressed in terms of \<open>measure of the effort to be expended in attacking a TOE, expressed in terms of
an attacker's expertise, resources and motivation\<close> an attacker's expertise, resources and motivation\<close>
Definition* [argmt_def::concept, tag= "''augmentation''"] Definition* [argmt_def, tag= "''augmentation''"]
\<open>addition of one or more requirement(s) to a package\<close> \<open>addition of one or more requirement(s) to a package\<close>
Definition* [authdata_def::concept, tag="''authentication data''"] Definition* [authdata_def, tag="''authentication data''"]
\<open>information used to verify the claimed identity of a user\<close> \<open>information used to verify the claimed identity of a user\<close>
Definition* [authusr_def::concept, tag = "''authorised user''"] Definition* [authusr_def, tag = "''authorised user''"]
\<open>@{docitem toe_def} user who may, in accordance with the @{docitem sfrs_def}, perform an operation\<close> \<open>@{docitem toe_def} user who may, in accordance with the @{docitem sfrs_def}, perform an operation\<close>
Definition* [bpp_def::concept, tag="''Base Protection Profile''"] Definition* [bpp_def, tag="''Base Protection Profile''"]
\<open>Protection Profile used as a basis to build a Protection Profile Configuration\<close> \<open>Protection Profile used as a basis to build a Protection Profile Configuration\<close>
Definition* [cls_def::concept,tag="''class''"] Definition* [cls_def,tag="''class''"]
\<open>set of CC families that share a common focus\<close> \<open>set of CC families that share a common focus\<close>
Definition* [cohrnt_def::concept,tag="''coherent''"] Definition* [cohrnt_def,tag="''coherent''"]
\<open>logically ordered and having discernible meaning For documentation, this addresses \<open>logically ordered and having discernible meaning For documentation, this addresses
both the actual text and the structure of the document, in terms of whether it is both the actual text and the structure of the document, in terms of whether it is
understandable by its target audience.\<close> understandable by its target audience.\<close>
Definition* [cmplt_def::concept, tag="''complete''"] Definition* [cmplt_def, tag="''complete''"]
\<open>property where all necessary parts of an entity have been provided \<open>property where all necessary parts of an entity have been provided
In terms of documentation, this means that all relevant information is In terms of documentation, this means that all relevant information is
covered in the documentation, at such a level of detail that no further covered in the documentation, at such a level of detail that no further
explanation is required at that level of abstraction.\<close> explanation is required at that level of abstraction.\<close>
Definition* [compnt_def::concept, tag="''component''"] Definition* [compnt_def, tag="''component''"]
\<open>smallest selectable set of elements on which requirements may be based\<close> \<open>smallest selectable set of elements on which requirements may be based\<close>
Definition*[cap_def::concept, tag="''composed assurance package''"] Definition*[cap_def, tag="''composed assurance package''"]
\<open>assurance package consisting of requirements drawn from CC Part 3 \<open>assurance package consisting of requirements drawn from CC Part 3
(predominately from the ACO class), representing a point on the CC predefined (predominately from the ACO class), representing a point on the CC predefined
composition assurance scale\<close> composition assurance scale\<close>
Definition* [cfrm_def::concept,tag="''confirm''"] Definition* [cfrm_def,tag="''confirm''"]
\<open>declare that something has been reviewed in detail with an independent determination \<open>declare that something has been reviewed in detail with an independent determination
of sufficiency of sufficiency
The level of rigour required depends on the nature of the subject matter. This The level of rigour required depends on the nature of the subject matter. This
term is only applied to evaluator actions.\<close> term is only applied to evaluator actions.\<close>
Definition* [cnnctvty_def::concept, tag="''connectivity''"] Definition* [cnnctvty_def, tag="''connectivity''"]
\<open>property of the @{docitem toe_def} allowing interaction with IT entities external to the \<open>property of the @{docitem toe_def} allowing interaction with IT entities external to the
@{docitem toe_def} @{docitem toe_def}
This includes exchange of data by wire or by wireless means, over any This includes exchange of data by wire or by wireless means, over any
distance in any environment or configuration.\<close> distance in any environment or configuration.\<close>
Definition* [cnstnt_def::concept, tag="''consistent''"] Definition* [cnstnt_def, tag="''consistent''"]
\<open>relationship between two or more entities such that there are no apparent \<open>relationship between two or more entities such that there are no apparent
contradictions between these entities\<close> contradictions between these entities\<close>
Definition* [cnt_vrb_def::concept, tag="''counter, verb''"] Definition* [cnt_vrb_def, tag="''counter, verb''"]
\<open>meet an attack where the impact of a particular threat is mitigated \<open>meet an attack where the impact of a particular threat is mitigated
but not necessarily eradicated\<close> but not necessarily eradicated\<close>
declare_reference*[st_def::concept] declare_reference*[st_def]
declare_reference*[pp_def::concept] declare_reference*[pp_def]
Definition* [dmnst_conf_def::concept, tag="''demonstrable conformance''"] Definition* [dmnst_conf_def, tag="''demonstrable conformance''"]
\<open>relation between an @{docitem st_def} and a @{docitem pp_def}, where the @{docitem st_def} \<open>relation between an @{docitem st_def} and a @{docitem pp_def}, where the @{docitem st_def}
provides a solution which solves the generic security problem in the PP provides a solution which solves the generic security problem in the PP
@ -116,19 +119,19 @@ also suitable for a @{docitem toe_def} type where several similar @{docitem pp_d
allowing the ST author to claim conformance to these @{docitem pp_def}s simultaneously, allowing the ST author to claim conformance to these @{docitem pp_def}s simultaneously,
thereby saving work.\<close> thereby saving work.\<close>
Definition* [dmstrt_def::concept, tag="''demonstrate''"] Definition* [dmstrt_def, tag="''demonstrate''"]
\<open>provide a conclusion gained by an analysis which is less rigorous than a “proof”\<close> \<open>provide a conclusion gained by an analysis which is less rigorous than a “proof”\<close>
Definition* [dpndcy::concept, tag="''dependency''"] Definition* [dpndcy, tag="''dependency''"]
\<open>relationship between components such that if a requirement based on the depending \<open>relationship between components such that if a requirement based on the depending
component is included in a @{docitem pp_def}, ST or package, a requirement based on component is included in a @{docitem pp_def}, ST or package, a requirement based on
the component that is depended upon must normally also be included in the @{docitem pp_def}, the component that is depended upon must normally also be included in the @{docitem pp_def},
@{docitem st_def} or package\<close> @{docitem st_def} or package\<close>
Definition* [dscrb_def::concept, tag="''describe''"] Definition* [dscrb_def, tag="''describe''"]
\<open>provide specific details of an entity\<close> \<open>provide specific details of an entity\<close>
Definition* [dtrmn_def::concept, tag="''determine''"] Definition* [dtrmn_def, tag="''determine''"]
\<open>affirm a particular conclusion based on independent analysis with the objective \<open>affirm a particular conclusion based on independent analysis with the objective
of reaching a particular conclusion of reaching a particular conclusion
@ -137,35 +140,35 @@ Definition* [dtrmn_def::concept, tag="''determine''"]
terms “confirm” or “verify” which imply that an analysis has already been terms “confirm” or “verify” which imply that an analysis has already been
performed which needs to be reviewed\<close> performed which needs to be reviewed\<close>
Definition* [devenv_def::concept, tag="''development environment''"] Definition* [devenv_def, tag="''development environment''"]
\<open>environment in which the @{docitem toe_def} is developed\<close> \<open>environment in which the @{docitem toe_def} is developed\<close>
Definition* [elmnt_def::concept, tag="''element''"] Definition* [elmnt_def, tag="''element''"]
\<open>indivisible statement of a security need\<close> \<open>indivisible statement of a security need\<close>
Definition* [ensr_def::concept, tag="''ensure''"] Definition* [ensr_def, tag="''ensure''"]
\<open>guarantee a strong causal relationship between an action and its consequences \<open>guarantee a strong causal relationship between an action and its consequences
When this term is preceded by the word “help” it indicates that the When this term is preceded by the word “help” it indicates that the
consequence is not fully certain, on the basis of that action alone.\<close> consequence is not fully certain, on the basis of that action alone.\<close>
Definition* [eval_def::concept, tag="''evaluation''"] Definition* [eval_def, tag="''evaluation''"]
\<open>assessment of a @{docitem pp_def}, an @{docitem st_def} or a @{docitem toe_def}, \<open>assessment of a @{docitem pp_def}, an @{docitem st_def} or a @{docitem toe_def},
against defined criteria.\<close> against defined criteria.\<close>
Definition* [eal_def::concept, tag= "''evaluation assurance level''"] Definition* [eal_def, tag= "''evaluation assurance level''"]
\<open>set of assurance requirements drawn from CC Part 3, representing a point on the \<open>set of assurance requirements drawn from CC Part 3, representing a point on the
CC predefined assurance scale, that form an assurance package\<close> CC predefined assurance scale, that form an assurance package\<close>
Definition* [eval_auth_def::concept, tag="''evaluation authority''"] Definition* [eval_auth_def, tag="''evaluation authority''"]
\<open>body that sets the standards and monitors the quality of evaluations conducted by bodies within a specific community and \<open>body that sets the standards and monitors the quality of evaluations conducted by bodies within a specific community and
implements the CC for that community by means of an evaluation scheme\<close> implements the CC for that community by means of an evaluation scheme\<close>
Definition* [eval_schm_def::concept, tag="''evaluation scheme''"] Definition* [eval_schm_def, tag="''evaluation scheme''"]
\<open>administrative and regulatory framework under which the CC is applied by an \<open>administrative and regulatory framework under which the CC is applied by an
evaluation authority within a specific community\<close> evaluation authority within a specific community\<close>
Definition* [exst_def::concept, tag="''exhaustive''"] Definition* [exst_def, tag="''exhaustive''"]
\<open>characteristic of a methodical approach taken to perform an \<open>characteristic of a methodical approach taken to perform an
analysis or activity according to an unambiguous plan analysis or activity according to an unambiguous plan
This term is used in the CC with respect to conducting an analysis or other This term is used in the CC with respect to conducting an analysis or other
@ -175,31 +178,31 @@ analysis or activity according to an unambiguous plan, but that the plan that
was followed is sufficient to ensure that all possible avenues have been was followed is sufficient to ensure that all possible avenues have been
exercised.\<close> exercised.\<close>
Definition* [expln_def::concept, tag="''explain''"] Definition* [expln_def, tag="''explain''"]
\<open> give argument accounting for the reason for taking a course of action \<open> give argument accounting for the reason for taking a course of action
This term differs from both “describe” and “demonstrate”. It is intended to This term differs from both “describe” and “demonstrate”. It is intended to
answer the question “Why?” without actually attempting to argue that the answer the question “Why?” without actually attempting to argue that the
course of action that was taken was necessarily optimal.\<close> course of action that was taken was necessarily optimal.\<close>
Definition* [extn_def::concept, tag= "''extension''"] Definition* [extn_def, tag= "''extension''"]
\<open>addition to an ST or PP of functional requirements not contained in CC \<open>addition to an ST or PP of functional requirements not contained in CC
Part 2 and/or assurance requirements not contained in CC Part 3\<close> Part 2 and/or assurance requirements not contained in CC Part 3\<close>
Definition* [extnl_ent_def::concept, tag="''external entity''"] Definition* [extnl_ent_def, tag="''external entity''"]
\<open>human or IT entity possibly interacting with the TOE from outside of the TOE boundary\<close> \<open>human or IT entity possibly interacting with the TOE from outside of the TOE boundary\<close>
Definition* [fmly_def::concept, tag="''family''"] Definition* [fmly_def, tag="''family''"]
\<open>set of components that share a similar goal but differ in emphasis or rigour\<close> \<open>set of components that share a similar goal but differ in emphasis or rigour\<close>
Definition* [fml_def::concept, tag="''formal''"] Definition* [fml_def, tag="''formal''"]
\<open>expressed in a restricted syntax language with defined semantics \<open>expressed in a restricted syntax language with defined semantics
based on well-established mathematical concepts \<close> based on well-established mathematical concepts \<close>
Definition* [gudn_doc_def::concept, tag="''guidance documentation''"] Definition* [gudn_doc_def, tag="''guidance documentation''"]
\<open>documentation that describes the delivery, preparation, operation, \<open>documentation that describes the delivery, preparation, operation,
management and/or use of the TOE\<close> management and/or use of the TOE\<close>
Definition* [ident_def::concept, tag="''identity''"] Definition* [ident_def, tag="''identity''"]
\<open>representation uniquely identifying entities (e.g. a user, a process or a disk) \<open>representation uniquely identifying entities (e.g. a user, a process or a disk)
within the context of the TOE within the context of the TOE
@ -207,110 +210,110 @@ Definition* [ident_def::concept, tag="''identity''"]
representation can be the full or abbreviated name or a (still unique) representation can be the full or abbreviated name or a (still unique)
pseudonym.\<close> pseudonym.\<close>
Definition* [infml_def::concept, tag="''informal''"] Definition* [infml_def, tag="''informal''"]
\<open>expressed in natural language\<close> \<open>expressed in natural language\<close>
Definition* [intr_tsf_trans_def::concept, tag ="''inter TSF transfers''"] Definition* [intr_tsf_trans_def, tag ="''inter TSF transfers''"]
\<open>communicating data between the TOE and the security functionality of \<open>communicating data between the TOE and the security functionality of
other trusted IT products\<close> other trusted IT products\<close>
Definition* [intl_com_chan_def::concept, tag ="''internal communication channel''"] Definition* [intl_com_chan_def, tag ="''internal communication channel''"]
\<open>communication channel between separated parts of the TOE\<close> \<open>communication channel between separated parts of the TOE\<close>
Definition* [int_toe_trans::concept, tag="''internal TOE transfer''"] Definition* [int_toe_trans, tag="''internal TOE transfer''"]
\<open>communicating data between separated parts of the TOE\<close> \<open>communicating data between separated parts of the TOE\<close>
Definition* [inter_consist_def::concept, tag="''internally consistent''"] Definition* [inter_consist_def, tag="''internally consistent''"]
\<open>no apparent contradictions exist between any aspects of an entity \<open>no apparent contradictions exist between any aspects of an entity
In terms of documentation, this means that there can be no statements within In terms of documentation, this means that there can be no statements within
the documentation that can be taken to contradict each other.\<close> the documentation that can be taken to contradict each other.\<close>
Definition* [iter_def::concept, tag="''iteration''"] Definition* [iter_def, tag="''iteration''"]
\<open>use of the same component to express two or more distinct requirements\<close> \<open>use of the same component to express two or more distinct requirements\<close>
Definition* [jstfct_def::concept, tag="''justification''"] Definition* [jstfct_def, tag="''justification''"]
\<open>analysis leading to a conclusion “Justification” is more rigorous than a demonstration. \<open>analysis leading to a conclusion “Justification” is more rigorous than a demonstration.
This term requires significant rigour in terms of very carefully and thoroughly explaining every This term requires significant rigour in terms of very carefully and thoroughly explaining every
step of a logical argument.\<close> step of a logical argument.\<close>
Definition* [objct_def::concept, tag="''object''"] Definition* [objct_def, tag="''object''"]
\<open>passive entity in the TOE, that contains or receives information, \<open>passive entity in the TOE, that contains or receives information,
and upon which subjects perform operations\<close> and upon which subjects perform operations\<close>
Definition* [op_cc_cmpnt_def::concept, tag ="''operation (on a component of the CC)''"] Definition* [op_cc_cmpnt_def, tag ="''operation (on a component of the CC)''"]
\<open>modification or repetition of a component \<open>modification or repetition of a component
Allowed operations on components are assignment, iteration, refinement and Allowed operations on components are assignment, iteration, refinement and
selection.\<close> selection.\<close>
Definition* [op_obj_def::concept, tag= "''operation (on an object)''"] Definition* [op_obj_def, tag= "''operation (on an object)''"]
\<open>specific type of action performed by a subject on an object\<close> \<open>specific type of action performed by a subject on an object\<close>
Definition* [op_env_def::concept, tag= "''operational environment''"] Definition* [op_env_def, tag= "''operational environment''"]
\<open>environment in which the TOE is operated\<close> \<open>environment in which the TOE is operated\<close>
Definition* [org_sec_po_def::concept, tag="''organisational security policy''"] Definition* [org_sec_po_def, tag="''organisational security policy''"]
\<open>set of security rules, procedures, or guidelines for an organisation \<open>set of security rules, procedures, or guidelines for an organisation
A policy may pertain to a specific operational environment.\<close> A policy may pertain to a specific operational environment.\<close>
Definition* [pckg_def::concept, tag="''package''"] Definition* [pckg_def, tag="''package''"]
\<open>named set of either security functional or security assurance requirements \<open>named set of either security functional or security assurance requirements
An example of a package is “EAL 3”.\<close> An example of a package is “EAL 3”.\<close>
Definition* [pp_config_def::concept, tag="''Protection Profile Configuration''"] Definition* [pp_config_def, tag="''Protection Profile Configuration''"]
\<open>Protection Profile composed of Base Protection Profiles and Protection Profile Module\<close> \<open>Protection Profile composed of Base Protection Profiles and Protection Profile Module\<close>
Definition* [pp_eval_def::concept, tag="''Protection Profile evaluation''"] Definition* [pp_eval_def, tag="''Protection Profile evaluation''"]
\<open> assessment of a PP against defined criteria \<close> \<open> assessment of a PP against defined criteria \<close>
Definition* [pp_def::concept, tag="''Protection Profile''"] Definition* [pp_def, tag="''Protection Profile''"]
\<open>implementation-independent statement of security needs for a TOE type\<close> \<open>implementation-independent statement of security needs for a TOE type\<close>
Definition* [ppm_def::concept, tag="''Protection Profile Module''"] Definition* [ppm_def, tag="''Protection Profile Module''"]
\<open>implementation-independent statement of security needs for a TOE type \<open>implementation-independent statement of security needs for a TOE type
complementary to one or more Base Protection Profiles\<close> complementary to one or more Base Protection Profiles\<close>
declare_reference*[tsf_def::concept] declare_reference*[tsf_def]
Definition* [prv_def::concept, tag="''prove''"] Definition* [prv_def, tag="''prove''"]
\<open>show correspondence by formal analysis in its mathematical sense \<open>show correspondence by formal analysis in its mathematical sense
It is completely rigorous in all ways. Typically, “prove” is used when there is It is completely rigorous in all ways. Typically, “prove” is used when there is
a desire to show correspondence between two @{docitem tsf_def} representations at a high a desire to show correspondence between two @{docitem tsf_def} representations at a high
level of rigour.\<close> level of rigour.\<close>
Definition* [ref_def::concept, tag="''refinement''"] Definition* [ref_def, tag="''refinement''"]
\<open>addition of details to a component\<close> \<open>addition of details to a component\<close>
Definition* [role_def::concept, tag="''role''"] Definition* [role_def, tag="''role''"]
\<open>predefined set of rules establishing the allowed interactions between \<open>predefined set of rules establishing the allowed interactions between
a user and the @{docitem toe_def}\<close> a user and the @{docitem toe_def}\<close>
declare_reference*[sfp_def::concept] declare_reference*[sfp_def]
Definition* [scrt_def::concept, tag="''secret''"] Definition* [scrt_def, tag="''secret''"]
\<open>information that must be known only to authorised users and/or the \<open>information that must be known only to authorised users and/or the
@{docitem tsf_def} in order to enforce a specific @{docitem sfp_def}\<close> @{docitem tsf_def} in order to enforce a specific @{docitem sfp_def}\<close>
declare_reference*[sfr_def::concept] declare_reference*[sfr_def]
Definition* [sec_st_def::concept, tag="''secure state''"] Definition* [sec_st_def, tag="''secure state''"]
\<open>state in which the @{docitem tsf_def} data are consistent and the @{docitem tsf_def} \<open>state in which the @{docitem tsf_def} data are consistent and the @{docitem tsf_def}
continues correct enforcement of the @{docitem sfr_def}s\<close> continues correct enforcement of the @{docitem sfr_def}s\<close>
Definition* [sec_att_def::concept, tag="''security attribute''"] Definition* [sec_att_def, tag="''security attribute''"]
\<open>property of subjects, users (including external IT products), objects, \<open>property of subjects, users (including external IT products), objects,
information, sessions and/or resources that is used in defining the @{docitem sfr_def}s information, sessions and/or resources that is used in defining the @{docitem sfr_def}s
and whose values are used in enforcing the @{docitem sfr_def}s\<close> and whose values are used in enforcing the @{docitem sfr_def}s\<close>
Definition* [sec_def::concept, tag="''security''"] Definition* [sec_def, tag="''security''"]
\<open>function policy set of rules describing specific security behaviour enforced \<open>function policy set of rules describing specific security behaviour enforced
by the @{docitem tsf_def} and expressible as a set of @{docitem sfr_def}s\<close> by the @{docitem tsf_def} and expressible as a set of @{docitem sfr_def}s\<close>
Definition* [sec_obj_def::concept, tag="''security objective''"] Definition* [sec_obj_def, tag="''security objective''"]
\<open>statement of an intent to counter identified threats and/or satisfy identified \<open>statement of an intent to counter identified threats and/or satisfy identified
organisation security policies and/or assumptions\<close> organisation security policies and/or assumptions\<close>
Definition* [sec_prob_def::concept, tag ="''security problem''"] Definition* [sec_prob_def, tag ="''security problem''"]
\<open>statement which in a formal manner defines the nature and scope of the security that \<open>statement which in a formal manner defines the nature and scope of the security that
the TOE is intended to address This statement consists of a combination of: the TOE is intended to address This statement consists of a combination of:
\begin{itemize} \begin{itemize}
@ -319,23 +322,23 @@ the TOE is intended to address This statement consists of a combination of:
 \item the assumptions that are upheld for the operational environment of the TOE.  \item the assumptions that are upheld for the operational environment of the TOE.
\end{itemize}\<close> \end{itemize}\<close>
Definition* [sr_def::concept, tag="''security requirement''", short_tag="Some(''SR'')"] Definition* [sr_def, tag="''security requirement''", short_tag="Some(''SR'')"]
\<open>requirement, stated in a standardised language, which is meant to contribute \<open>requirement, stated in a standardised language, which is meant to contribute
to achieving the security objectives for a TOE\<close> to achieving the security objectives for a TOE\<close>
text \<open>@{docitem toe_def}\<close> text \<open>@{docitem toe_def}\<close>
Definition* [st::concept, tag="''Security Target''", short_tag="Some(''ST'')"] Definition* [st, tag="''Security Target''", short_tag="Some(''ST'')"]
\<open>implementation-dependent statement of security needs for a specific i\<section>dentified @{docitem toe_def}\<close> \<open>implementation-dependent statement of security needs for a specific i\<section>dentified @{docitem toe_def}\<close>
Definition* [slct_def::concept, tag="''selection''"] Definition* [slct_def, tag="''selection''"]
\<open>specification of one or more items from a list in a component\<close> \<open>specification of one or more items from a list in a component\<close>
Definition* [smfrml_def::concept, tag="''semiformal''"] Definition* [smfrml_def, tag="''semiformal''"]
\<open>expressed in a restricted syntax language with defined semantics\<close> \<open>expressed in a restricted syntax language with defined semantics\<close>
Definition* [spcfy_def::concept, tag= "''specify''"] Definition* [spcfy_def, tag= "''specify''"]
\<open>provide specific details about an entity in a rigorous and precise manner\<close> \<open>provide specific details about an entity in a rigorous and precise manner\<close>
Definition* [strct_conf_def::concept, tag="''strict conformance''"] Definition* [strct_conf_def, tag="''strict conformance''"]
\<open>hierarchical relationship between a PP and an ST where all the requirements in the \<open>hierarchical relationship between a PP and an ST where all the requirements in the
PP also exist in the ST PP also exist in the ST
@ -344,36 +347,36 @@ Definition* [strct_conf_def::concept, tag="''strict conformance''"]
be used for stringent requirements that are to be adhered to in a single be used for stringent requirements that are to be adhered to in a single
manner. \<close> manner. \<close>
Definition* [st_eval_def::concept, tag="''ST evaluation''"] Definition* [st_eval_def, tag="''ST evaluation''"]
\<open>assessment of an ST against defined criteria\<close> \<open>assessment of an ST against defined criteria\<close>
Definition* [subj_def::concept, tag="''subject''"] Definition* [subj_def, tag="''subject''"]
\<open>active entity in the TOE that performs operations on objects\<close> \<open>active entity in the TOE that performs operations on objects\<close>
Definition* [toe::concept, tag= "''target of evaluation''"] Definition* [toe, tag= "''target of evaluation''"]
\<open>set of software, firmware and/or hardware possibly accompanied by guidance\<close> \<open>set of software, firmware and/or hardware possibly accompanied by guidance\<close>
Definition* [thrt_agnt_def::concept, tag="''threat agent''"] Definition* [thrt_agnt_def, tag="''threat agent''"]
\<open>entity that can adversely act on assets\<close> \<open>entity that can adversely act on assets\<close>
Definition* [toe_eval_def::concept, tag="''TOE evaluation''"] Definition* [toe_eval_def, tag="''TOE evaluation''"]
\<open>assessment of a TOE against defined criteria\<close> \<open>assessment of a TOE against defined criteria\<close>
Definition* [toe_res_def::concept, tag="''TOE resource''"] Definition* [toe_res_def, tag="''TOE resource''"]
\<open>anything useable or consumable in the TOE\<close> \<open>anything useable or consumable in the TOE\<close>
Definition* [toe_sf_def::concept, tag="''TOE security functionality''", short_tag= "Some(''TSF'')"] Definition* [toe_sf_def, tag="''TOE security functionality''", short_tag= "Some(''TSF'')"]
\<open>combined functionality of all hardware, software, and firmware of a TOE that must be relied upon \<open>combined functionality of all hardware, software, and firmware of a TOE that must be relied upon
for the correct enforcement of the @{docitem sfr_def}s\<close> for the correct enforcement of the @{docitem sfr_def}s\<close>
Definition* [tr_vrb_def::concept, tag="''trace, verb''"] Definition* [tr_vrb_def, tag="''trace, verb''"]
\<open>perform an informal correspondence analysis between two entities with only a \<open>perform an informal correspondence analysis between two entities with only a
minimal level of rigour\<close> minimal level of rigour\<close>
Definition* [trnsfs_out_toe_def::concept, tag="''transfers outside of the TOE''"] Definition* [trnsfs_out_toe_def, tag="''transfers outside of the TOE''"]
\<open>TSF mediated communication of data to entities not under the control of the TSF\<close> \<open>TSF mediated communication of data to entities not under the control of the TSF\<close>
Definition* [transl_def::concept, tag= "''translation''"] Definition* [transl_def, tag= "''translation''"]
\<open> describes the process of describing security requirements in a \<open> describes the process of describing security requirements in a
standardised language. standardised language.
@ -381,45 +384,45 @@ use of the term translation in this context is not literal and does not imply
that every SFR expressed in standardised language can also be translated that every SFR expressed in standardised language can also be translated
back to the security objectives.\<close> back to the security objectives.\<close>
Definition* [trst_chan_def::concept, tag="''trusted channel''"] Definition* [trst_chan_def, tag="''trusted channel''"]
\<open>a means by which a TSF and another trusted IT product \<open>a means by which a TSF and another trusted IT product
can communicate with necessary confidence\<close> can communicate with necessary confidence\<close>
Definition* [trst_it_prod_def::concept, tag="''trusted IT product''"] Definition* [trst_it_prod_def, tag="''trusted IT product''"]
\<open>IT product, other than the TOE, which has its security functional requirements administratively coordinated with the TOE \<open>IT product, other than the TOE, which has its security functional requirements administratively coordinated with the TOE
and which is assumed to enforce its security functional requirements correctly and which is assumed to enforce its security functional requirements correctly
An example of a trusted IT product would be one that has been separately An example of a trusted IT product would be one that has been separately
evaluated.\<close> evaluated.\<close>
Definition* [trst_path_def::concept, tag="''trusted path''"] Definition* [trst_path_def, tag="''trusted path''"]
\<open>means by which a user and a TSF can communicate with the necessary confidence\<close> \<open>means by which a user and a TSF can communicate with the necessary confidence\<close>
Definition* [tsf_data_def::concept, tag="''TSF data''"] Definition* [tsf_data_def, tag="''TSF data''"]
\<open>data for the operation of the TOE upon which the enforcement of the SFR relies\<close> \<open>data for the operation of the TOE upon which the enforcement of the SFR relies\<close>
Definition* [tsf_intrfc_def::concept, tag="''TSF interface''"] Definition* [tsf_intrfc_def, tag="''TSF interface''"]
\<open>means by which external entities (or subjects in the TOE but outside of the TSF) \<open>means by which external entities (or subjects in the TOE but outside of the TSF)
supply data to the TSF, receive data from the TSF and invoke services from the TSF\<close> supply data to the TSF, receive data from the TSF and invoke services from the TSF\<close>
Definition* [usr_def::concept, tag="''user''"] \<open>see external entity\<close> Definition* [usr_def, tag="''user''"] \<open>see external entity\<close>
Definition* [usr_datat_def::concept, tag="''user data''"] Definition* [usr_datat_def, tag="''user data''"]
\<open>data for the user, that does not affect the operation of the TSF\<close> \<open>data for the user, that does not affect the operation of the TSF\<close>
Definition* [vrfy_def::concept, tag="''verify''"] Definition* [vrfy_def, tag="''verify''"]
\<open>rigorously review in detail with an independent determination of \<open>rigorously review in detail with an independent determination of
sufficiency sufficiency
Also see “confirm”. This term has more rigorous connotations. The term Also see “confirm”. This term has more rigorous connotations. The term
“verify” is used in the context of evaluator actions where an independent “verify” is used in the context of evaluator actions where an independent
effort is required of the evaluator.\<close> effort is required of the evaluator.\<close>
Definition* [dev_def::concept, tag="''Developer''"] Definition* [dev_def, tag="''Developer''"]
\<open>who respond to actual or perceived consumer security requirements in \<open>who respond to actual or perceived consumer security requirements in
constructing a @{docitem toe_def}, reference this CC_Part_3 constructing a @{docitem toe_def}, reference this CC_Part_3
when interpreting statements of assurance requirements and determining when interpreting statements of assurance requirements and determining
assurance approaches of @{docitem toe}s.\<close> assurance approaches of @{docitem toe}s.\<close>
Definition*[evalu_def::concept, tag="'' Evaluator''"] Definition*[evalu_def, tag="'' Evaluator''"]
\<open>who use the assurance requirements defined in CC_Part_3 \<open>who use the assurance requirements defined in CC_Part_3
as mandatory statement of evaluation criteria when determining the assurance as mandatory statement of evaluation criteria when determining the assurance
of @{docitem toe_def}s and when evaluating @{docitem pp_def}s and @{docitem st_def}s.\<close> of @{docitem toe_def}s and when evaluating @{docitem pp_def}s and @{docitem st_def}s.\<close>

View File

@ -28,8 +28,20 @@ theory CENELEC_50128
imports "../technical_report/technical_report" imports "../technical_report/technical_report"
begin begin
(* this is a hack and should go into an own ontology, providing thingsd like:
- Assumption*
- Hypothesis*
- Definition*. (Une redefinition de ce qui se passe dans tech-report, cible a semi-formal
“Definitions of terminology” \<dots> )
- Objective"
- Claim*
- Requirement*
*)
text\<open>We re-use the class @\<open>typ math_content\<close>, which provides also a framework for text\<open>We re-use the class @\<open>typ math_content\<close>, which provides also a framework for
semi-formal terminology, which we re-use by this definition.\<close> semi-formal "math-alike" terminology, which we re-use by this definition.\<close>
doc_class semi_formal_content = math_content + doc_class semi_formal_content = math_content +
status :: status <= "semiformal" status :: status <= "semiformal"
@ -39,27 +51,29 @@ type_synonym sfc = semi_formal_content
(*>>*) (*>>*)
declare[[ Definition_default_class="semi_formal_content"]]
text\<open> Excerpt of the BE EN 50128:2011, page 22. \<close> text\<open> Excerpt of the BE EN 50128:2011, page 22. \<close>
section\<open>Terms and Definitions\<close> section\<open>Terms and Definitions\<close>
typ "sfc" Definition*[assessment,short_name="''assessment''"]
Definition*[assessment::sfc,short_name="''assessment''"]
\<open>process of analysis to determine whether software, which may include \<open>process of analysis to determine whether software, which may include
process, documentation, system, subsystem hardware and/or software components, meets the specified process, documentation, system, subsystem hardware and/or software components, meets the specified
requirements and to form a judgement as to whether the software is fit for its intended purpose. requirements and to form a judgement as to whether the software is fit for its intended purpose.
Safety assessment is focused on but not limited to the safety properties of a system.\<close> Safety assessment is focused on but not limited to the safety properties of a system.\<close>
Definition*[assessor::sfc, short_name="''assessor''"] Definition*[assessor, short_name="''assessor''"]
\<open>entity that carries out an assessment\<close> \<open>entity that carries out an assessment\<close>
Definition*[COTS::sfc, short_name="''commercial off-the-shelf software''"] Definition*[COTS, short_name="''commercial off-the-shelf software''"]
\<open>software defined by market-driven need, commercially available and whose fitness for purpose \<open>software defined by market-driven need, commercially available and whose fitness for purpose
has been demonstrated by a broad spectrum of commercial users\<close> has been demonstrated by a broad spectrum of commercial users\<close>
Definition*[component::sfc] Definition*[component]
\<open>a constituent part of software which has well-defined interfaces and behaviour \<open>a constituent part of software which has well-defined interfaces and behaviour
with respect to the software architecture and design and fulfils the following with respect to the software architecture and design and fulfils the following
criteria: criteria:
@ -71,53 +85,53 @@ criteria:
\<close> \<close>
typ "sfc" typ "sfc"
Definition*[CMGR::sfc, short_name="''configuration manager''"] Definition*[CMGR, short_name="''configuration manager''"]
\<open>entity that is responsible for implementing and carrying out the processes \<open>entity that is responsible for implementing and carrying out the processes
for the configuration management of documents, software and related tools including for the configuration management of documents, software and related tools including
\<^emph>\<open>change management\<close>\<close> \<^emph>\<open>change management\<close>\<close>
Definition*[customer::sfc] Definition*[customer]
\<open>entity which purchases a railway control and protection system including the software\<close> \<open>entity which purchases a railway control and protection system including the software\<close>
Definition*[designer::sfc] Definition*[designer]
\<open>entity that analyses and transforms specified requirements into acceptable design solutions \<open>entity that analyses and transforms specified requirements into acceptable design solutions
which have the required safety integrity level\<close> which have the required safety integrity level\<close>
Definition*[entity::sfc] Definition*[entity]
\<open>person, group or organisation who fulfils a role as defined in this European Standard\<close> \<open>person, group or organisation who fulfils a role as defined in this European Standard\<close>
declare_reference*[fault::sfc] declare_reference*[fault]
Definition*[error::sfc] Definition*[error]
\<open>defect, mistake or inaccuracy which could result in failure or in a deviation \<open>defect, mistake or inaccuracy which could result in failure or in a deviation
from the intended performance or behaviour (cf. @{semi_formal_content (unchecked) \<open>fault\<close>}))\<close> from the intended performance or behaviour (cf. @{semi_formal_content (unchecked) \<open>fault\<close>}))\<close>
Definition*[fault::sfc] Definition*[fault]
\<open>defect, mistake or inaccuracy which could result in failure or in a deviation \<open>defect, mistake or inaccuracy which could result in failure or in a deviation
from the intended performance or behaviour (cf @{semi_formal_content \<open>error\<close>})\<close> from the intended performance or behaviour (cf @{semi_formal_content \<open>error\<close>})\<close>
Definition*[failure::sfc] Definition*[failure]
\<open>unacceptable difference between required and observed performance\<close> \<open>unacceptable difference between required and observed performance\<close>
Definition*[FT::sfc, short_name="''fault tolerance''"] Definition*[FT, short_name="''fault tolerance''"]
\<open>built-in capability of a system to provide continued correct provision of service as specified, \<open>built-in capability of a system to provide continued correct provision of service as specified,
in the presence of a limited number of hardware or software faults\<close> in the presence of a limited number of hardware or software faults\<close>
Definition*[firmware::sfc] Definition*[firmware]
\<open>software stored in read-only memory or in semi-permanent storage such as flash memory, in a \<open>software stored in read-only memory or in semi-permanent storage such as flash memory, in a
way that is functionally independent of applicative software\<close> way that is functionally independent of applicative software\<close>
Definition*[GS::sfc,short_name="''generic software''"] Definition*[GS,short_name="''generic software''"]
\<open>software which can be used for a variety of installations purely by the provision of \<open>software which can be used for a variety of installations purely by the provision of
application-specific data and/or algorithms\<close> application-specific data and/or algorithms\<close>
Definition*[implementer::sfc] Definition*[implementer]
\<open>entity that transforms specified designs into their physical realisation\<close> \<open>entity that transforms specified designs into their physical realisation\<close>
Definition*[integration::sfc] Definition*[integration]
\<open>process of assembling software and/or hardware items, according to the architectural and \<open>process of assembling software and/or hardware items, according to the architectural and
design specification, and testing the integrated unit\<close> design specification, and testing the integrated unit\<close>
Definition*[integrator::sfc] Definition*[integrator]
\<open>entity that carries out software integration\<close> \<open>entity that carries out software integration\<close>
Definition*[PES :: sfc, short_name="''pre-existing software''"] Definition*[PES :: sfc, short_name="''pre-existing software''"]
@ -127,52 +141,52 @@ off-the shelf) and open source software\<close>
Definition*[OSS :: sfc, short_name="''open source software''"] Definition*[OSS :: sfc, short_name="''open source software''"]
\<open>source code available to the general public with relaxed or non-existent copyright restrictions\<close> \<open>source code available to the general public with relaxed or non-existent copyright restrictions\<close>
Definition*[PLC::sfc, short_name="''programmable logic controller''"] Definition*[PLC, short_name="''programmable logic controller''"]
\<open>solid-state control system which has a user programmable memory for storage of instructions to \<open>solid-state control system which has a user programmable memory for storage of instructions to
implement specific functions\<close> implement specific functions\<close>
Definition*[PM::sfc, short_name="''project management''"] Definition*[PM, short_name="''project management''"]
\<open>administrative and/or technical conduct of a project, including safety aspects\<close> \<open>administrative and/or technical conduct of a project, including safety aspects\<close>
Definition*[PGMGR::sfc, short_name="''project manager''"] Definition*[PGMGR, short_name="''project manager''"]
\<open>entity that carries out project management\<close> \<open>entity that carries out project management\<close>
Definition*[reliability::sfc] Definition*[reliability]
\<open>ability of an item to perform a required function under given conditions for a given period of time\<close> \<open>ability of an item to perform a required function under given conditions for a given period of time\<close>
Definition*[robustness::sfc] Definition*[robustness]
\<open>ability of an item to detect and handle abnormal situations\<close> \<open>ability of an item to detect and handle abnormal situations\<close>
Definition*[RMGR::sfc, short_name="''requirements manager''"] Definition*[RMGR, short_name="''requirements manager''"]
\<open>entity that carries out requirements management\<close> \<open>entity that carries out requirements management\<close>
Definition*[RMGMT::sfc, short_name="''requirements management''"] Definition*[RMGMT, short_name="''requirements management''"]
\<open>the process of eliciting, documenting, analysing, prioritising and agreeing on requirements and \<open>the process of eliciting, documenting, analysing, prioritising and agreeing on requirements and
then controlling change and communicating to relevant stakeholders. It is a continuous process then controlling change and communicating to relevant stakeholders. It is a continuous process
throughout a project\<close> throughout a project\<close>
Definition*[risk::sfc] Definition*[risk]
\<open>combination of the rate of occurrence of accidents and incidents resulting in harm (caused by \<open>combination of the rate of occurrence of accidents and incidents resulting in harm (caused by
a hazard) and the degree of severity of that harm\<close> a hazard) and the degree of severity of that harm\<close>
Definition*[safety::sfc] Definition*[safety]
\<open>freedom from unacceptable levels of risk of harm to people\<close> \<open>freedom from unacceptable levels of risk of harm to people\<close>
Definition*[SA::sfc, short_name="''safety authority''"] Definition*[SA, short_name="''safety authority''"]
\<open>body responsible for certifying that safety related software or services comply with relevant \<open>body responsible for certifying that safety related software or services comply with relevant
statutory safety requirements\<close> statutory safety requirements\<close>
Definition*[SF::sfc, short_name="''safety function''"] Definition*[SF, short_name="''safety function''"]
\<open>a function that implements a part or whole of a safety requirement\<close> \<open>a function that implements a part or whole of a safety requirement\<close>
Definition*[SFRS::sfc, short_name= "''safety-related software''"] Definition*[SFRS, short_name= "''safety-related software''"]
\<open>software which performs safety functions\<close> \<open>software which performs safety functions\<close>
Definition*[software::sfc] Definition*[software]
\<open>intellectual creation comprising the programs, procedures, rules, data and any associated \<open>intellectual creation comprising the programs, procedures, rules, data and any associated
documentation pertaining to the operation of a system\<close> documentation pertaining to the operation of a system\<close>
Definition*[SB::sfc, short_name="''software baseline''"] Definition*[SB, short_name="''software baseline''"]
\<open>complete and consistent set of source code, executable files, configuration files, \<open>complete and consistent set of source code, executable files, configuration files,
installation scripts and documentation that are needed for a software release. Information about installation scripts and documentation that are needed for a software release. Information about
compilers, operating systems, preexisting software and dependent tools is stored as part of the compilers, operating systems, preexisting software and dependent tools is stored as part of the
@ -183,7 +197,7 @@ released and assessed
Definition*[SWLC::sfc, short_name="''software life-cycle''"] Definition*[SWLC, short_name="''software life-cycle''"]
\<open>those activities occurring during a period of time that starts when \<open>those activities occurring during a period of time that starts when
software is conceived and ends when the software is no longer available for use. The software life software is conceived and ends when the software is no longer available for use. The software life
cycle typically includes a requirements phase, design phase,test phase, integration phase, cycle typically includes a requirements phase, design phase,test phase, integration phase,
@ -191,35 +205,35 @@ deployment phase and a maintenance phase 3.1.35 software maintainability
capability of the software to be modified; to correct faults, improve to a different environment capability of the software to be modified; to correct faults, improve to a different environment
\<close> \<close>
Definition*[SM::sfc, short_name="''software maintenance''"] Definition*[SM, short_name="''software maintenance''"]
\<open> action, or set of actions, carried out on software after deployment functionality \<open> action, or set of actions, carried out on software after deployment functionality
performance or other attributes, or adapt it with the aim of enhancing or correcting its\<close> performance or other attributes, or adapt it with the aim of enhancing or correcting its\<close>
Definition*[SOSIL::sfc, short_name="''software safety integrity level''"] Definition*[SOSIL, short_name="''software safety integrity level''"]
\<open>classification number which determines the techniques and measures that have to be applied to \<open>classification number which determines the techniques and measures that have to be applied to
software NOTE Safety-related software has been classified into five safety integrity levels, where software NOTE Safety-related software has been classified into five safety integrity levels, where
0 is the lowest and 4 the highest.\<close> 0 is the lowest and 4 the highest.\<close>
Definition*[supplier::sfc] Definition*[supplier]
\<open>entity that designs and builds a railway control and protection system including the software \<open>entity that designs and builds a railway control and protection system including the software
or parts thereof\<close> or parts thereof\<close>
Definition*[SYSIL::sfc, short_name="''system safety integrity level''"] Definition*[SYSIL, short_name="''system safety integrity level''"]
\<open>classification number which indicates the required degree of confidence that an integrated \<open>classification number which indicates the required degree of confidence that an integrated
system comprising hardware and software will meet its specified safety requirements\<close> system comprising hardware and software will meet its specified safety requirements\<close>
Definition*[tester::sfc]\<open>an entity that carries out testing\<close> Definition*[tester]\<open>an entity that carries out testing\<close>
Definition*[testing::sfc] Definition*[testing]
\<open>process of executing software under controlled conditions as to ascertain its behaviour and \<open>process of executing software under controlled conditions as to ascertain its behaviour and
performance compared to the corresponding requirements specification\<close> performance compared to the corresponding requirements specification\<close>
Definition*[TCT1::sfc, short_name="''tool class T1''"] Definition*[TCT1, short_name="''tool class T1''"]
\<open>generates no outputs which can directly or indirectly contribute to the executable code \<open>generates no outputs which can directly or indirectly contribute to the executable code
(including data) of the software NOTE 11 examples include: a text editor or a requirement or (including data) of the software NOTE 11 examples include: a text editor or a requirement or
design support tool with no automatic code generation capabilities; configuration control tools.\<close> design support tool with no automatic code generation capabilities; configuration control tools.\<close>
Definition*[TCT2::sfc,short_name="''tool class T2''"] Definition*[TCT2,short_name="''tool class T2''"]
\<open>supports the test or verification of the design or executable code, where errors in the tool \<open>supports the test or verification of the design or executable code, where errors in the tool
can fail to reveal defects but cannot directly create errors in the executable software can fail to reveal defects but cannot directly create errors in the executable software
NOTE T2 examples include: a test harness generator; a test coverage measurement tool; a static NOTE T2 examples include: a test harness generator; a test coverage measurement tool; a static
@ -227,35 +241,35 @@ analysis tool. reproduce defined versions and be the input for future releases a
at upgrade in the maintenance phase at upgrade in the maintenance phase
\<close> \<close>
Definition*[TCT3::sfc, short_name="''tool class T3''"] Definition*[TCT3, short_name="''tool class T3''"]
\<open>generates outputs which can directly or indirectly contribute to the executable code \<open>generates outputs which can directly or indirectly contribute to the executable code
(including data) of the safety related system NOTE T3 examples include: a source code compiler, (including data) of the safety related system NOTE T3 examples include: a source code compiler,
a data/algorithms compiler, a tool to change set-points during system operation; an optimising a data/algorithms compiler, a tool to change set-points during system operation; an optimising
compiler where the relationship between the source code program and the generated object code is compiler where the relationship between the source code program and the generated object code is
not obvious; a compiler that incorporates an executable run-time package into the executable code. not obvious; a compiler that incorporates an executable run-time package into the executable code.
\<close> \<close>
Definition*[traceability::sfc, short_name="''traceability''"] Definition*[traceability, short_name="''traceability''"]
\<open>degree to which relationship can be established between two or more products of a development \<open>degree to which relationship can be established between two or more products of a development
process, especially those having a predecessor/successor or master/subordinate relationship to one process, especially those having a predecessor/successor or master/subordinate relationship to one
another\<close> another\<close>
Definition*[validation::sfc, short_name="''validation''"] Definition*[validation, short_name="''validation''"]
\<open>process of analysis followed by a judgment based on evidence to \<open>process of analysis followed by a judgment based on evidence to
documentation, software or application) fits the user needs,in particular with respect to safety documentation, software or application) fits the user needs,in particular with respect to safety
and quality and determine whether an item (e.g. process, with emphasis on the suitability of its and quality and determine whether an item (e.g. process, with emphasis on the suitability of its
operation in accordance to its purpose in its intended environment\<close> operation in accordance to its purpose in its intended environment\<close>
Definition*[validator::sfc, short_name="''validator''"] Definition*[validator, short_name="''validator''"]
\<open>entity that is responsible for the validation\<close> \<open>entity that is responsible for the validation\<close>
Definition*[verification::sfc, short_name="''verification''"] Definition*[verification, short_name="''verification''"]
\<open>process of examination followed by a judgment based on evidence that output items (process, \<open>process of examination followed by a judgment based on evidence that output items (process,
documentation, software or application) of a specific development phase fulfils the requirements of documentation, software or application) of a specific development phase fulfils the requirements of
that phase with respect to completeness, correctness and consistency. that phase with respect to completeness, correctness and consistency.
NOTE Verification is mostly based on document reviews (design, implementation, test documents etc.). NOTE Verification is mostly based on document reviews (design, implementation, test documents etc.).
\<close> \<close>
Definition*[verifier::sfc, short_name="''verifier''"] Definition*[verifier, short_name="''verifier''"]
\<open>entity that is responsible for one or more verification activities\<close> \<open>entity that is responsible for one or more verification activities\<close>

View File

@ -49,7 +49,7 @@ doc_class E = D +
doc_class F = doc_class F =
properties :: "term list" properties :: "term list"
r :: "thm list" r :: "thm list"
u :: "file" u :: "file"
@ -62,7 +62,7 @@ doc_class F =
doc_class G = C + doc_class G = C +
g :: "thm" <= "@{thm ''HOL.refl''}" g :: "thm" <= "@{thm \<open>HOL.refl\<close>}"
doc_class M = doc_class M =
trace :: "(A + C + D + F) list" trace :: "(A + C + D + F) list"
@ -79,8 +79,8 @@ ML\<open> Thy_Header.get_keywords @{theory};(* this looks to be really theory gl
section*[test::A]\<open>Test and Validation\<close> section*[test::A]\<open>Test and Validation\<close>
text\<open>Defining some document elements to be referenced in later on in another theory: \<close> text\<open>Defining some document elements to be referenced in later on in another theory: \<close>
text*[sdf]\<open> Lorem ipsum @{thm refl}\<close> text*[sdf]\<open> Lorem ipsum @{thm refl}\<close>
text*[ sdfg] \<open> Lorem ipsum @{thm refl}\<close> text*[ sdfg :: F] \<open> Lorem ipsum @{thm refl}\<close>
text*[ xxxy ] \<open> Lorem ipsum @{docitem \<open>sdfg\<close>} rate @{thm refl}\<close> text*[ xxxy ] \<open> Lorem ipsum @{F \<open>sdfg\<close>} rate @{thm refl}\<close>
end end

View File

@ -11,12 +11,13 @@
* SPDX-License-Identifier: BSD-2-Clause * SPDX-License-Identifier: BSD-2-Clause
*************************************************************************) *************************************************************************)
section\<open>An example ontology for a scholarly paper\<close> section\<open>An example ontology for scientific, MINT-oriented papers.\<close>
theory scholarly_paper theory scholarly_paper
imports "../../DOF/Isa_COL" imports "../../DOF/Isa_COL"
keywords "author*" "abstract*" keywords "author*" "abstract*"
"Definition*" "Lemma*" "Theorem*" :: document_body "Definition*" "Lemma*" "Theorem*" :: document_body
and "assert*" :: thy_decl
begin begin
@ -123,7 +124,7 @@ A formal statement can, but must not have a reference to true formal Isabelle/Is
subsection\<open>Technical Content and its Formats\<close> subsection\<open>Technical Content and its Formats\<close>
datatype status = semiformal | description datatype status = formal | semiformal | description
text\<open>The class \<^verbatim>\<open>technical\<close> regroups a number of text-elements that contain typical text\<open>The class \<^verbatim>\<open>technical\<close> regroups a number of text-elements that contain typical
"technical content" in mathematical or engineering papers: definitions, theorems, lemmas, examples. \<close> "technical content" in mathematical or engineering papers: definitions, theorems, lemmas, examples. \<close>
@ -154,7 +155,7 @@ doc_class example = text_section +
short_name :: string <= "''''" short_name :: string <= "''''"
subsection\<open>Mathematical Content\<close> subsection\<open>Freeform Mathematical Content\<close>
text\<open>We follow in our enumeration referentiable mathematical content class the AMS style and its text\<open>We follow in our enumeration referentiable mathematical content class the AMS style and its
provided \<^emph>\<open>theorem environments\<close> (see \<^verbatim>\<open>texdoc amslatex\<close>). We add, however, the concepts provided \<^emph>\<open>theorem environments\<close> (see \<^verbatim>\<open>texdoc amslatex\<close>). We add, however, the concepts
@ -189,8 +190,21 @@ doc_class math_content = tc +
invariant s2 :: "\<lambda> \<sigma>::math_content. status \<sigma> = semiformal" invariant s2 :: "\<lambda> \<sigma>::math_content. status \<sigma> = semiformal"
type_synonym math_tc = math_content type_synonym math_tc = math_content
text\<open>The class \<^typ>\<open>math_content\<close> is perhaps more adequaltely described as "math-alike content".
Sub-classes can englobe instances such as:
\<^item> terminological definitions such as:
\<open>Definition*[assessor::sfc, short_name="''assessor''"]\<open>entity that carries out an assessment\<close>\<close>
\<^item> free-form mathematical definitions such as:
\<open>Definition*[process_ordering, short_name="''process ordering''"]\<open>
We define \<open>P \<sqsubseteq> Q \<equiv> \<psi>\<^sub>\<D> \<and> \<psi>\<^sub>\<R> \<and> \<psi>\<^sub>\<M> \<close>, where \<^vs>\<open>-0.2cm\<close>
1) \<^vs>\<open>-0.2cm\<close> \<open>\<psi>\<^sub>\<D> = \<D> P \<supseteq> \<D> Q \<close>
2) ...
\<close>\<close>
\<^item> semi-formal descriptions, which are free-form mathematical definitions on which finally
an attribute with a formal Isabelle definition is attached.
\<close>
find_theorems name:"s1" name:"scholarly"
(* type qualification is a work around *) (* type qualification is a work around *)
@ -244,42 +258,139 @@ doc_class "math_example" = math_content +
mcc :: "math_content_class" <= "expl" mcc :: "math_content_class" <= "expl"
invariant d5 :: "\<lambda> \<sigma>::math_example. mcc \<sigma> = expl" invariant d5 :: "\<lambda> \<sigma>::math_example. mcc \<sigma> = expl"
subsection\<open>Ontological Macros\<close>
subsubsection\<open>Ontological Macros \<^verbatim>\<open>Definition*\<close> , \<^verbatim>\<open>Lemma**\<close>, \<^verbatim>\<open>Theorem*\<close> ... \<close>
text\<open>These ontological macros allow notations are defined for the class
\<^typ>\<open>math_content\<close> in order to allow for a variety of free-form formats;
in order to provide specific sub-classes, default options can be set
in order to support more succinct notations and avoid constructs
such as :
\<^theory_text>\<open>Definition*[l::"definition"]\<open>...\<close>\<close>.
Instead, the more convenient global declaration
\<^theory_text>\<open>declare[[Definition_default_class="definition"]]\<close>
supports subsequent abbreviations:
\<^theory_text>\<open>Definition*[l]\<open>...\<close>\<close>.
\<close>
ML\<open>
val (Definition_default_class, Definition_default_class_setup)
= Attrib.config_string \<^binding>\<open>Definition_default_class\<close> (K "");
val (Lemma_default_class, Lemma_default_class_setup)
= Attrib.config_string \<^binding>\<open>Lemma_default_class\<close> (K "");
val (Theorem_default_class, Theorem_default_class_setup)
= Attrib.config_string \<^binding>\<open>Theorem_default_class\<close> (K "");
\<close>
setup\<open>Definition_default_class_setup\<close>
setup\<open>Lemma_default_class_setup\<close>
setup\<open>Theorem_default_class_setup\<close>
ML\<open> local open ODL_Command_Parser in ML\<open> local open ODL_Command_Parser in
(* *********************************************************************** *)
(* Ontological Macro Command Support *)
(* *********************************************************************** *)
(* {markdown = true} sets the parsing process such that in the text-core markdown elements are (* {markdown = true} sets the parsing process such that in the text-core
accepted. *) markdown elements are accepted. *)
val _ =
Outer_Syntax.command ("Definition*", @{here}) "Textual Definition" val _ = let fun use_Definition_default thy =
(attributes -- Parse.opt_target -- Parse.document_source --| semi let val ddc = Config.get_global thy Definition_default_class
>> (Toplevel.theory o (Onto_Macros.enriched_formal_statement_command in (SOME(((ddc = "") ? (K "math_content")) ddc)) end
(SOME "math_content") (* should be (SOME "definition") *) in Outer_Syntax.command ("Definition*", @{here}) "Textual Definition"
[("mcc","defn")] (attributes -- Parse.opt_target -- Parse.document_source --| semi
{markdown = true} ))); >> (Toplevel.theory o (fn args => fn thy =>
Onto_Macros.enriched_formal_statement_command
(use_Definition_default thy)
[("mcc","defn")]
{markdown = true} args thy)))
end;
val _ = val _ = let fun use_Lemma_default thy =
Outer_Syntax.command ("Lemma*", @{here}) "Textual Lemma Outline" let val ddc = Config.get_global thy Definition_default_class
(attributes -- Parse.opt_target -- Parse.document_source --| semi in (SOME(((ddc = "") ? (K "math_content")) ddc)) end
>> (Toplevel.theory o (Onto_Macros.enriched_formal_statement_command in Outer_Syntax.command ("Lemma*", @{here}) "Textual Lemma Outline"
(SOME "lemma") (attributes -- Parse.opt_target -- Parse.document_source --| semi
[("mcc","lem")] >> (Toplevel.theory o (fn args => fn thy =>
{markdown = true} ))); Onto_Macros.enriched_formal_statement_command
(use_Lemma_default thy)
[("mcc","lem")]
{markdown = true} args thy)))
end;
val _ = val _ = let fun use_Theorem_default thy =
Outer_Syntax.command ("Theorem*", @{here}) "Textual Theorem Outline" let val ddc = Config.get_global thy Definition_default_class
(attributes -- Parse.opt_target -- Parse.document_source --| semi in (SOME(((ddc = "") ? (K "math_content")) ddc)) end
>> (Toplevel.theory o (Onto_Macros.enriched_formal_statement_command in Outer_Syntax.command ("Theorem*", @{here}) "Textual Theorem Outline"
(SOME "theorem") (attributes -- Parse.opt_target -- Parse.document_source --| semi
[("mcc","thm")] >> (Toplevel.theory o (fn args => fn thy =>
{markdown = true} ))); Onto_Macros.enriched_formal_statement_command
(use_Theorem_default thy)
[("mcc","thm")]
{markdown = true} args thy)))
end;
end end
\<close> \<close>
subsection\<open>Formal Mathematical Content\<close>
text\<open>While this library is intended to give a lot of space to freeform text elements in
order to counterbalance Isabelle's standard view, it should not be forgot that the real strength
of Isabelle is its ability to handle both - and to establish links between both worlds. Therefore:\<close>
doc_class math_formal = math_content +
referentiable :: bool <= False
status :: status <= "formal"
properties :: "term list"
type_synonym math_fc = math_formal
doc_class assertion = math_formal +
referentiable :: bool <= True (* No support in Backend yet. *)
status :: status <= "formal"
properties :: "term list"
ML\<open>
(* TODO : Rework this code and make it closer to Definition*. There is still
a rest of "abstract classes in it: any class possessing a properties attribute
is admissible to this command, not just ... *)
local open ODL_Command_Parser in
fun assertion_cmd'((((((oid,pos),cid_pos),doc_attrs),name_opt:string option),modes : string list),
prop) =
let fun conv_2_holstring thy = (bstring_to_holstring (Proof_Context.init_global thy))
fun conv_attrs thy = (("properties",pos),"[@{termrepr ''"^conv_2_holstring thy prop ^" ''}]")
::doc_attrs
fun conv_attrs' thy = map (fn ((lhs,pos),rhs) => (((lhs,pos),"+="),rhs)) (conv_attrs thy)
fun mks thy = case DOF_core.get_object_global_opt oid thy of
SOME NONE => (error("update of declared but not created doc_item:" ^ oid))
| SOME _ => (update_instance_command (((oid,pos),cid_pos),conv_attrs' thy) thy)
| NONE => (create_and_check_docitem
{is_monitor = false} {is_inline = false}
oid pos cid_pos (conv_attrs thy) thy)
val check = (assert_cmd name_opt modes prop) o Proof_Context.init_global
in
(* Toplevel.keep (check o Toplevel.context_of) *)
Toplevel.theory (fn thy => (check thy; mks thy))
end
val attributes = attributes (* re-export *)
end
val _ =
Outer_Syntax.command @{command_keyword "assert*"}
"evaluate and print term"
(attributes -- opt_evaluator -- opt_modes -- Parse.term >> assertion_cmd');
\<close>
subsubsection*[ex_ass::example]\<open>Example\<close>
text\<open>Assertions allow for logical statements to be checked in the global context). \<close>
assert*[ass1::assertion, short_name = "\<open>This is an assertion\<close>"] \<open>(3::int) < 4\<close>
subsection\<open>Example Statements\<close> subsection\<open>Example Statements\<close>
text\<open> \<^verbatim>\<open>examples\<close> are currently considered \<^verbatim>\<open>technical\<close>. Is a main category to be refined text\<open> \<^verbatim>\<open>examples\<close> are currently considered \<^verbatim>\<open>technical\<close>. Is a main category to be refined
@ -293,7 +404,8 @@ doc_class tech_example = technical +
subsection\<open>Content in Engineering/Tech Papers \<close> subsection\<open>Content in Engineering/Tech Papers \<close>
text\<open>This section is currently experimental and not supported by the documentation
generation backend.\<close>
doc_class engineering_content = tc + doc_class engineering_content = tc +
short_name :: string <= "''''" short_name :: string <= "''''"
@ -415,52 +527,60 @@ setup\<open> let val cidS = ["scholarly_paper.introduction","scholarly_paper.tec
true) true)
in DOF_core.update_class_invariant "scholarly_paper.article" body end\<close> in DOF_core.update_class_invariant "scholarly_paper.article" body end\<close>
ML\<open> \<close>
(* some test code *)
ML\<open>
(*
val trace = AttributeAccess.compute_trace_ML (Context.Proof @{context}) "this" @{here} @{here}
val groups = partition ( @{context}) cidS trace
val _::_::_::_:: _ ::_ ::_ ::a::_ = groups;
check;
fun get_level_raw oid = AttributeAccess.compute_attr_access (Context.Proof @{context}) "level" oid @{here} @{here};
fun get_level oid = dest_option (snd o HOLogic.dest_number) (get_level_raw (oid));
fun check_level_hd a = case (get_level (snd a)) of
NONE => error("Invariant violation: leading section" ^ snd a ^
" must have lowest level")
| SOME X => X
fun check_group_elem level_hd a = case (get_level (snd a)) of
NONE => true
| SOME y => if y > level_hd then true
else error("Invariant violation: subsequent section " ^ snd a ^
" must have higher level.");
fun check_group a = map (check_group_elem (check_level_hd (hd a))) (tl a) ;
*)
\<close>
section\<open>Miscelleous\<close> section\<open>Miscelleous\<close>
ML\<open>
Parse.int
\<close>
subsection\<open>Layout Trimming Commands\<close>
setup\<open> DOF_lib.define_macro \<^binding>\<open>hs\<close> "\\hspace{" "}" (K(K())) \<close>
setup\<open> DOF_lib.define_macro \<^binding>\<open>vs\<close> "\\vspace{" "}" (K(K())) \<close>
setup\<open> DOF_lib.define_shortcut \<^binding>\<open>clearpage\<close> "\\clearpage{}" \<close>
subsection\<open>Common Abbreviations\<close> subsection\<open>Common Abbreviations\<close>
setup \<open> DOF_lib.define_shortcut \<^binding>\<open>eg\<close> "\\eg"
(* Latin: „exempli gratia“ meaning „for example“. *)
#> DOF_lib.define_shortcut \<^binding>\<open>ie\<close> "\\ie"
(* Latin: „id est“ meaning „that is to say“. *)
#> DOF_lib.define_shortcut \<^binding>\<open>etc\<close> "\\etc"\<close>
(* this is an alternative style for macro definitions equivalent to setup ... setup ...*)
define_shortcut* eg \<rightleftharpoons> \<open>\eg\<close> (* Latin: „exempli gratia“ meaning „for example“. *)
ie \<rightleftharpoons> \<open>\ie\<close> (* Latin: „id est“ meaning „that is to say“. *)
etc \<rightleftharpoons> \<open>\etc\<close> (* Latin : „et cetera“ meaning „et cetera“ *)
subsection\<open>Layout Trimming Commands (with syntactic checks)\<close>
ML\<open>
local
val scan_cm = Scan.ahead (Basic_Symbol_Pos.$$$ "c" |-- Basic_Symbol_Pos.$$$ "m" ) ;
val scan_pt = Scan.ahead (Basic_Symbol_Pos.$$$ "p" |-- Basic_Symbol_Pos.$$$ "t" ) ;
val scan_blank = Scan.repeat ( Basic_Symbol_Pos.$$$ " "
|| Basic_Symbol_Pos.$$$ "\t"
|| Basic_Symbol_Pos.$$$ "\n");
val scan_latex_measure = (scan_blank
|-- Scan.option (Basic_Symbol_Pos.$$$ "-")
|-- Symbol_Pos.scan_nat
|-- (Scan.option ((Basic_Symbol_Pos.$$$ ".") |-- Symbol_Pos.scan_nat))
|-- scan_blank
|-- (scan_cm || scan_pt)
|-- scan_blank
);
in
fun check_latex_measure _ src =
let val _ = ((Scan.catch scan_latex_measure (Symbol_Pos.explode(Input.source_content src)))
handle Fail _ => error ("syntax error in LaTeX measure") )
in () end
end\<close>
setup\<open> DOF_lib.define_macro \<^binding>\<open>vs\<close> "\\vspace{" "}" (check_latex_measure) \<close>
setup\<open> DOF_lib.define_macro \<^binding>\<open>hs\<close> "\\hspace{" "}" (check_latex_measure) \<close>
(*<*)
text\<open>Tests: \<^vs>\<open>-0.14cm\<close>\<close>
ML\<open> check_latex_measure @{context} (Input.string "-3.14 cm") \<close>
define_macro* vs2 \<rightleftharpoons> \<open>\vspace{\<close> _ \<open>}\<close> (check_latex_measure) (* checkers NYI on Isar-level *)
define_macro* hs2 \<rightleftharpoons> \<open>\hspace{\<close> _ \<open>}\<close> (* works fine without checker.*)
(*>*)
define_shortcut* clearpage \<rightleftharpoons> \<open>\clearpage{}\<close>
hf \<rightleftharpoons> \<open>\hfill\<close>
br \<rightleftharpoons> \<open>\break\<close>
end end

View File

@ -35,7 +35,7 @@ doc_class index =
section\<open>Code Statement Elements\<close> section\<open>Code Statement Elements\<close>
doc_class "code" = technical + doc_class "code" = technical +
checked :: bool <= "False" checked :: bool <= "False"
caption :: "string" <= "''''" caption :: "string" <= "''''"
@ -79,251 +79,3 @@ doc_class report =
end end
(*
=====================================
docclass: Isa_COL.thms
name: "thms"
origin: Isa_COL
attrs: "properties"
invs:
docclass: Isa_COL.figure
name: "figure"
origin: Isa_COL
attrs: "relative_width", "src", "placement", "spawn_columns"(True)
invs:
docclass: Isa_COL.chapter = Isa_COL.text_element +
name: "chapter"
origin: Isa_COL
attrs: "level"(Some 0)
invs:
docclass: Isa_COL.concept
name: "concept"
origin: Isa_COL
attrs: "tag"([]), "properties"([])
invs:
docclass: Isa_COL.section = Isa_COL.text_element +
name: "section"
origin: Isa_COL
attrs: "level"(Some 1)
invs:
docclass: Isa_COL.assertions
name: "assertions"
origin: Isa_COL
attrs: "properties"
invs:
docclass: Isa_COL.subsection = Isa_COL.text_element +
name: "subsection"
origin: Isa_COL
attrs: "level"(Some 2)
invs:
docclass: Isa_COL.definitions
name: "definitions"
origin: Isa_COL
attrs: "requires", "establishes"
invs:
docclass: Isa_COL.formal_item
name: "formal_item"
origin: Isa_COL
attrs: "item"
invs:
docclass: Isa_COL.figure_group
name: "figure_group"
origin: Isa_COL
attrs: "trace"([]), "caption"
invs:
docclass: Isa_COL.text_element
name: "text_element"
origin: Isa_COL
attrs: "level"(None), "referentiable"(False), "variants"({STR ''outline'', STR ''document''})
invs:
docclass: scholarly_paper.data = scholarly_paper.engineering_content +
name: "data"
origin: scholarly_paper
attrs: "tag"([])
invs:
docclass: technical_report.SML = technical_report.code +
name: "SML"
origin: technical_report
attrs: "checked"(False)
invs:
docclass: Isa_COL.subsubsection = Isa_COL.text_element +
name: "subsubsection"
origin: Isa_COL
attrs: "level"(Some 3)
invs:
docclass: scholarly_paper.annex = scholarly_paper.text_section +
name: "annex"
origin: scholarly_paper
attrs: "main_author"(None)
invs:
docclass: scholarly_paper.lemma = scholarly_paper.math_content +
name: "lemma"
origin: scholarly_paper
attrs: "referentiable"(True), "mcc"(lem)
invs: d3::\<lambda>\<sigma>. lemma.mcc \<sigma> = lem
docclass: scholarly_paper.title
name: "title"
origin: scholarly_paper
attrs: "short_title"(None)
invs:
docclass: technical_report.ISAR = technical_report.code +
name: "ISAR"
origin: technical_report
attrs: "checked"(False)
invs:
docclass: technical_report.code = scholarly_paper.technical +
name: "code"
origin: technical_report
attrs: "checked"(False), "label"([])
invs:
docclass: Isa_COL.formal_content
name: "formal_content"
origin: Isa_COL
attrs: "trace"([]), "style"
invs:
docclass: scholarly_paper.author
name: "author"
origin: scholarly_paper
attrs: "email"([]), "http_site"([]), "orcid"([]), "affiliation"
invs:
docclass: technical_report.LATEX = technical_report.code +
name: "LATEX"
origin: technical_report
attrs: "checked"(False)
invs:
docclass: technical_report.index
name: "index"
origin: technical_report
attrs: "kind", "level"
invs:
docclass: scholarly_paper.article
name: "article"
origin: scholarly_paper
attrs: "trace"([]), "style_id"(''LNCS''), "version"((0, 0, 0))
invs:
docclass: scholarly_paper.example = scholarly_paper.text_section +
name: "example"
origin: scholarly_paper
attrs: "referentiable"(True), "status"(description), "short_name"([])
invs:
docclass: scholarly_paper.theorem = scholarly_paper.math_content +
name: "theorem"
origin: scholarly_paper
attrs: "referentiable"(True), "mcc"(thm)
invs: d2::\<lambda>\<sigma>. theorem.mcc \<sigma> = thm
docclass: scholarly_paper.abstract
name: "abstract"
origin: scholarly_paper
attrs: "keywordlist"([]), "principal_theorems"
invs:
docclass: scholarly_paper.subtitle
name: "subtitle"
origin: scholarly_paper
attrs: "abbrev"(None)
invs:
docclass: scholarly_paper.corollary = scholarly_paper.math_content +
name: "corollary"
origin: scholarly_paper
attrs: "referentiable"(True), "mcc"(cor)
invs: d4::\<lambda>\<sigma>. corollary.mcc \<sigma> = thm
docclass: scholarly_paper.technical = scholarly_paper.text_section +
name: "technical"
origin: scholarly_paper
attrs: "definition_list"([]), "status"(description), "formal_results"
invs: L1::\<lambda>\<sigma>. 0 < the (text_section.level \<sigma>)
docclass: scholarly_paper.conclusion = scholarly_paper.text_section +
name: "conclusion"
origin: scholarly_paper
attrs: "main_author"(None)
invs:
docclass: scholarly_paper.definition = scholarly_paper.math_content +
name: "definition"
origin: scholarly_paper
attrs: "referentiable"(True), "mcc"(defn)
invs: d1::\<lambda>\<sigma>. definition.mcc \<sigma> = defn
docclass: scholarly_paper.evaluation = scholarly_paper.engineering_content +
name: "evaluation"
origin: scholarly_paper
attrs: "tag"([])
invs:
docclass: scholarly_paper.experiment = scholarly_paper.engineering_content +
name: "experiment"
origin: scholarly_paper
attrs: "tag"([])
invs:
docclass: Isa_COL.side_by_side_figure = Isa_COL.figure +
name: "side_by_side_figure"
origin: Isa_COL
attrs: "anchor", "caption", "relative_width2", "src2", "anchor2", "caption2"
invs:
docclass: scholarly_paper.bibliography = scholarly_paper.text_section +
name: "bibliography"
origin: scholarly_paper
attrs: "style"(Some ''LNCS'')
invs:
docclass: scholarly_paper.introduction = scholarly_paper.text_section +
name: "introduction"
origin: scholarly_paper
attrs: "comment", "claims"
invs:
docclass: scholarly_paper.math_content = scholarly_paper.technical +
name: "math_content"
origin: scholarly_paper
attrs: "referentiable"(True), "short_name"([]), "status"(semiformal), "mcc"(thm)
invs: s1::\<lambda>\<sigma>. \<not> math_content.referentiable \<sigma> \<longrightarrow>
math_content.short_name \<sigma> = [], s2::\<lambda>\<sigma>. math_content.status \<sigma> = semiformal
docclass: scholarly_paper.math_example = scholarly_paper.math_content +
name: "math_example"
origin: scholarly_paper
attrs: "referentiable"(True), "mcc"(expl)
invs: d5::\<lambda>\<sigma>. math_example.mcc \<sigma> = expl
docclass: scholarly_paper.related_work = scholarly_paper.conclusion +
name: "related_work"
origin: scholarly_paper
attrs: "main_author"(None)
invs:
docclass: scholarly_paper.tech_example = scholarly_paper.technical +
name: "tech_example"
origin: scholarly_paper
attrs: "referentiable"(True), "tag"([])
invs:
docclass: scholarly_paper.text_section = Isa_COL.text_element +
name: "text_section"
origin: scholarly_paper
attrs: "main_author"(None), "fixme_list"([]), "level"(None)
invs:
docclass: technical_report.front_matter
name: "front_matter"
origin: technical_report
attrs: "front_matter_style"
invs:
docclass: scholarly_paper.math_motivation = scholarly_paper.technical +
name: "math_motivation"
origin: scholarly_paper
attrs: "referentiable"(False)
invs:
docclass: scholarly_paper.math_semiformal = scholarly_paper.math_content +
name: "math_semiformal"
origin: scholarly_paper
attrs: "referentiable"(True)
invs:
docclass: scholarly_paper.math_explanation = scholarly_paper.technical +
name: "math_explanation"
origin: scholarly_paper
attrs: "referentiable"(False)
invs:
docclass: technical_report.table_of_contents
name: "table_of_contents"
origin: technical_report
attrs: "bookmark_depth"(3), "depth"(3)
invs:
docclass: scholarly_paper.engineering_content = scholarly_paper.technical +
name: "engineering_content"
origin: scholarly_paper
attrs: "short_name"([]), "status"
invs:
=====================================
*)

View File

@ -16,6 +16,7 @@ theory
imports imports
"Isabelle_DOF.Conceptual" "Isabelle_DOF.Conceptual"
"Isabelle_DOF.math_paper" "Isabelle_DOF.math_paper"
"Isabelle_DOF.scholarly_paper" (* for assert notation *)
begin begin
section\<open>Elementary Creation of Doc-items and Access of their Attibutes\<close> section\<open>Elementary Creation of Doc-items and Access of their Attibutes\<close>

View File

@ -4,5 +4,5 @@ session "Isabelle_DOF-tests" = "Isabelle_DOF" +
"AssnsLemmaThmEtc" "AssnsLemmaThmEtc"
"Concept_ExampleInvariant" "Concept_ExampleInvariant"
"Concept_Example" "Concept_Example"
"InnerSyntaxAntiquotations" "TermAntiquotations"
"Attributes" "Attributes"

View File

@ -18,7 +18,7 @@ For historical reasons, \<^emph>\<open>term antiquotations\<close> are called th
"Inner Syntax Antiquotations". \<close> "Inner Syntax Antiquotations". \<close>
theory theory
InnerSyntaxAntiquotations TermAntiquotations
imports imports
"Isabelle_DOF.Conceptual" "Isabelle_DOF.Conceptual"
begin begin
@ -50,7 +50,7 @@ text\<open>Some sample lemma:\<close>
lemma murks : "Example=Example" by simp lemma murks : "Example=Example" by simp
text\<open>Example for a meta-attribute of ODL-type @{typ "file"} with an appropriate ISA for the text\<open>Example for a meta-attribute of ODL-type @{typ "file"} with an appropriate ISA for the
file @{file "InnerSyntaxAntiquotations.thy"}\<close> file @{file "TermAntiquotations.thy"}\<close>
(* not working: (* not working:
text*[xcv::F, u="@{file ''InnerSyntaxAntiquotations.thy''}"]\<open>Lorem ipsum ...\<close> text*[xcv::F, u="@{file ''InnerSyntaxAntiquotations.thy''}"]\<open>Lorem ipsum ...\<close>
*) *)
@ -65,7 +65,7 @@ text*[xcv2::C, g="@{thm ''HOL.refl''}"]\<open>Lorem ipsum ...\<close>
text\<open>Major sample: test-item of doc-class \<open>F\<close> with a relational link between class instances, text\<open>Major sample: test-item of doc-class \<open>F\<close> with a relational link between class instances,
and links to formal Isabelle items like \<open>typ\<close>, \<open>term\<close> and \<open>thm\<close>. \<close> and links to formal Isabelle items like \<open>typ\<close>, \<open>term\<close> and \<open>thm\<close>. \<close>
text*[xcv4::F, r="[@{thm ''HOL.refl''}, text*[xcv4::F, r="[@{thm ''HOL.refl''},
@{thm \<open>InnerSyntaxAntiquotations.murks\<close>}]", (* long names required *) @{thm \<open>TermAntiquotations.murks\<close>}]", (* long names required *)
b="{(@{docitem ''xcv1''},@{docitem \<open>xcv2\<close>})}", (* notations \<open>...\<close> vs. ''...'' *) b="{(@{docitem ''xcv1''},@{docitem \<open>xcv2\<close>})}", (* notations \<open>...\<close> vs. ''...'' *)
s="[@{typ \<open>int list\<close>}]", s="[@{typ \<open>int list\<close>}]",
properties = "[@{term \<open>H \<longrightarrow> H\<close>}]" (* notation \<open>...\<close> required for UTF8*) properties = "[@{term \<open>H \<longrightarrow> H\<close>}]" (* notation \<open>...\<close> required for UTF8*)