Browse Source

Merge.

master
Achim D. Brucker 2 weeks ago
parent
commit
6927781d26
18 changed files with 1321 additions and 1069 deletions
  1. +2
    -2
      examples/CENELEC_50128/mini_odo/mini_odo.thy
  2. +6
    -9
      examples/scholarly_paper/2018-cicm-isabelle_dof-applications/IsaDofApplications.thy
  3. +47
    -50
      examples/scholarly_paper/2020-iFM-CSP/paper.thy
  4. +17
    -16
      examples/technical_report/Isabelle_DOF-Manual/00_Frontmatter.thy
  5. +2
    -0
      examples/technical_report/Isabelle_DOF-Manual/03_GuidedTour.thy
  6. +617
    -244
      examples/technical_report/Isabelle_DOF-Manual/04_RefMan.thy
  7. +1
    -1
      examples/technical_report/Isabelle_DOF-Manual/05_Implementation.thy
  8. +28
    -27
      examples/technical_report/TR_my_commented_isabelle/TR_MyCommentedIsabelle.thy
  9. +15
    -193
      src/DOF/Isa_COL.thy
  10. +222
    -53
      src/DOF/Isa_DOF.thy
  11. +101
    -98
      src/ontologies/CC_v3.1_R5/CC_terminology.thy
  12. +63
    -49
      src/ontologies/CENELEC_50128/CENELEC_50128.thy
  13. +4
    -4
      src/ontologies/Conceptual/Conceptual.thy
  14. +190
    -70
      src/ontologies/scholarly_paper/scholarly_paper.thy
  15. +1
    -249
      src/ontologies/technical_report/technical_report.thy
  16. +1
    -0
      src/tests/AssnsLemmaThmEtc.thy
  17. +1
    -1
      src/tests/ROOT
  18. +3
    -3
      src/tests/TermAntiquotations.thy

+ 2
- 2
examples/CENELEC_50128/mini_odo/mini_odo.thy View File

@@ -534,14 +534,14 @@ text\<open>

text\<open>Examples for declaration of typed doc-items "assumption" and "hypothesis",
concepts defined in the underlying ontology @{theory "Isabelle_DOF.CENELEC_50128"}. \<close>
text*[ass1::assumption, long_name="Some ''assumption one''"] \<open> The subsystem Y is safe. \<close>
text*[ass2::assumption, long_name="Some ''assumption one''"] \<open> The subsystem Y is safe. \<close>
text*[hyp1::hypothesis] \<open> P not equal NP \<close>
text\<open>A real example fragment from a larger project, declaring a text-element as a
"safety-related application condition", a concept defined in the
@{theory "Isabelle_DOF.CENELEC_50128"} ontology:\<close>

text*[hyp2::hypothesis]\<open>Under the assumption @{assumption \<open>ass1\<close>} we establish the following: ... \<close>
text*[hyp2::hypothesis]\<open>Under the assumption @{assumption \<open>ass2\<close>} we establish the following: ... \<close>

text*[ass122::SRAC, long_name="Some ''ass122''"] \<open> The overall sampling frequence of the odometer
subsystem is therefore 14 khz, which includes sampling, computing and


+ 6
- 9
examples/scholarly_paper/2018-cicm-isabelle_dof-applications/IsaDofApplications.thy View File

@@ -19,17 +19,14 @@ begin
open_monitor*[this::article]
declare[[strict_monitor_checking=false]]

setup \<open> DOF_lib.define_shortcut \<^binding>\<open>isadof\<close> "\\isadof"
#> DOF_lib.define_shortcut \<^binding>\<open>LaTeX\<close> "\\LaTeX{}"
#> DOF_lib.define_shortcut \<^binding>\<open>Protege\<close> "Prot{\\'e}g{\\'e}"
#> DOF_lib.define_shortcut \<^binding>\<open>dots\<close> "\\ldots"
#> DOF_lib.define_shortcut \<^binding>\<open>isabelle\<close> "Isabelle/HOL"

\<close>
define_shortcut* isadof \<rightleftharpoons> \<open>\isadof\<close>
LaTeX \<rightleftharpoons> \<open>\LaTeX{}\<close>
dots \<rightleftharpoons> \<open>\ldots\<close>
isabelle \<rightleftharpoons> \<open>Isabelle/HOL\<close>
Protege \<rightleftharpoons> \<open>Prot{\'e}g{\'e}\<close>

(* slanted text in contrast to italics *)
setup\<open> DOF_lib.define_macro \<^binding>\<open>slanted_text\<close> "\\textsl{" "}" (K(K()))\<close>

define_macro* slanted_text \<rightleftharpoons> \<open>\textsl{\<close> _ \<open>}\<close>

(*>*)



+ 47
- 50
examples/scholarly_paper/2020-iFM-CSP/paper.thy View File

@@ -1,27 +1,30 @@
(*<*)
theory "paper"
imports
"Isabelle_DOF.scholarly_paper"
imports "Isabelle_DOF.scholarly_paper"
begin


open_monitor*[this::article]

declare[[strict_monitor_checking = false]]
declare[[ strict_monitor_checking = false]]
declare[[ Definition_default_class = "definition"]]
declare[[ Lemma_default_class = "lemma"]]
declare[[ Theorem_default_class = "theorem"]]

setup \<open> DOF_lib.define_shortcut \<^binding>\<open>csp\<close> "CSP"
#> DOF_lib.define_shortcut \<^binding>\<open>isabelle\<close> "Isabelle/HOL"\<close>
define_shortcut* csp \<rightleftharpoons> \<open>CSP\<close>
holcsp \<rightleftharpoons> \<open>HOL-CSP\<close>
isabelle \<rightleftharpoons> \<open>Isabelle/HOL\<close>

(*>*)

title*[tit::title]\<open>Philosophers may Dine - Definitively!\<close>
author*[safouan,email="\<open>safouan.taha@lri.fr\<close>",affiliation="\<open>LRI, CentraleSupelec\<close>"]\<open>Safouan Taha\<close>
author*[bu,email= "\<open>wolff@lri.fr\<close>",affiliation = "\<open>LRI, Université Paris-Saclay\<close>"]\<open>Burkhart Wolff\<close>
author*[lina,email="\<open>lina.ye@lri.fr\<close>",affiliation="\<open>LRI, Inria, LSV, CentraleSupelec\<close>"]\<open>Lina Ye\<close>
abstract*[abs, keywordlist="[\<open>Shallow Embedding\<close>,\<open>Process-Algebra\<close>,
\<open>Concurrency\<close>,\<open>Computational Models\<close>]"]
\<open>Concurrency\<close>,\<open>Computational Models\<close>]"]
\<open> The theory of Communicating Sequential Processes going back to Hoare and Roscoe is still today
one of the reference theories for concurrent specification and computing. In 1997, a first
formalization in \<^isabelle> of the denotational semantics of the Failure/Divergence Model of
@@ -60,8 +63,8 @@ systems, such as the T9000 transansputer @{cite "Barret95"}.
The theory of \<^csp> was first described in 1978 in a book by Tony Hoare @{cite "Hoare:1985:CSP:3921"},
but has since evolved substantially @{cite "BrookesHR84" and "brookes-roscoe85" and "roscoe:csp:1998"}.
\<^csp> describes the most common communication and synchronization mechanisms
with one single language primitive: synchronous communication written \<open>_\<lbrakk>_\<rbrakk>_\<close>. \<^csp> semantics is described
by a fully abstract model of behaviour designed to be \<^emph>\<open>compositional\<close>: the denotational
with one single language primitive: synchronous communication written \<open>_\<lbrakk>_\<rbrakk>_\<close>. \<^csp> semantics is
described by a fully abstract model of behaviour designed to be \<^emph>\<open>compositional\<close>: the denotational
semantics of a process \<open>P\<close> encompasses all possible behaviours of this process in the context of all
possible environments \<open>P \<lbrakk>S\<rbrakk> Env\<close> (where \<open>S\<close> is the set of \<open>atomic events\<close> both \<open>P\<close> and \<open>Env\<close> must
synchronize). This design objective has the consequence that two kinds of choice have to
@@ -156,7 +159,7 @@ Let two processes be defined as follows:
\<^enum> \<open>P\<^sub>n\<^sub>d\<^sub>e\<^sub>t = (a \<rightarrow> Stop) \<sqinter> (b \<rightarrow> Stop)\<close>
\<close>

text\<open>\<^noindent> These two processes \<open>P\<^sub>d\<^sub>e\<^sub>t\<close> and \<open>P\<^sub>n\<^sub>d\<^sub>e\<^sub>t\<close> cannot be distinguished by using
text\<open>These two processes \<open>P\<^sub>d\<^sub>e\<^sub>t\<close> and \<open>P\<^sub>n\<^sub>d\<^sub>e\<^sub>t\<close> cannot be distinguished by using
the trace semantics: \<open>\<T>(P\<^sub>d\<^sub>e\<^sub>t) = \<T>(P\<^sub>n\<^sub>d\<^sub>e\<^sub>t) = {[],[a],[b]}\<close>. To resolve this problem, Brookes @{cite "BrookesHR84"}
proposed the failures model, where communication traces were augmented with the
constraint information for further communication that is represented negatively as a refusal set.
@@ -181,7 +184,7 @@ many times. However, using the \<^csp> hiding operator \<open>_\_\<close>, this

\<close>

text\<open>\<^noindent> where \<open>P\<^sub>i\<^sub>n\<^sub>f\<close> will be equivalent to \<open>\<bottom>\<close> in the process cpo ordering.
text\<open>where \<open>P\<^sub>i\<^sub>n\<^sub>f\<close> will be equivalent to \<open>\<bottom>\<close> in the process cpo ordering.
To distinguish divergences from the deadlock process, Brookes and Roscoe
proposed failure/divergence model to incorporate divergence traces @{cite "brookes-roscoe85"}.
A divergence trace is the one leading to a possible divergent behavior.
@@ -245,7 +248,7 @@ Second, in the traditional literature, the semantic domain is implicitly describ
over the three semantic functions \<open>\<T>\<close>, \<open>\<F>\<close> and \<open>\<D>\<close>.
Informally, these are:

\<^item> the initial trace of a process must be empty;
\<^item> the initial trace of a process must be empty;
\<^item> any allowed trace must be \<open>front\<^sub>-tickFree\<close>;
\<^item> traces of a process are \<^emph>\<open>prefix-closed\<close>;
\<^item> a process can refuse all subsets of a refusal set;
@@ -256,8 +259,7 @@ Informally, these are:
\<^item> a trace ending with \<open>\<surd>\<close> belonging to divergence set implies that its
maximum prefix without \<open>\<surd>\<close> is also a divergent trace.


\<^noindent> More formally, a process \<open>P\<close> of the type \<open>\<Sigma> process\<close> should have the following properties:
More formally, a process \<open>P\<close> of the type \<open>\<Sigma> process\<close> should have the following properties:


@{cartouche [display] \<open>([],{}) \<in> \<F> P \<and>
@@ -270,9 +272,8 @@ Informally, these are:
(\<forall> s X. s \<in> \<D> P \<longrightarrow> (s,X) \<in> \<F> P) \<and>
(\<forall> s. s@[\<surd>] \<in> \<D> P \<longrightarrow> s \<in> \<D> P)\<close>}


Our objective is to encapsulate this wishlist into a type constructed as a conservative
theory extension in our theory HOL-\<^csp>.
theory extension in our theory \<^holcsp>.
Therefore third, we define a pre-type for processes \<open>\<Sigma> process\<^sub>0\<close> by \<open> \<P>(\<Sigma>\<^sup>\<surd>\<^sup>* \<times> \<P>(\<Sigma>\<^sup>\<surd>)) \<times> \<P>(\<Sigma>\<^sup>\<surd>)\<close>.
Forth, we turn our wishlist of "axioms" above into the definition of a predicate \<open>is_process P\<close>
of type \<open>\<Sigma> process\<^sub>0 \<Rightarrow> bool\<close> deciding if its conditions are fulfilled. Since \<open>P\<close> is a pre-process,
@@ -281,7 +282,7 @@ And last not least fifth, we use the following type definition:
\<^item> \<^theory_text>\<open>typedef '\<alpha> process = "{P :: '\<alpha> process\<^sub>0 . is_process P}"\<close>


\<^noindent> Isabelle requires a proof for the existence of a witness for this set,
Isabelle requires a proof for the existence of a witness for this set,
but this can be constructed in a straight-forward manner. Suitable definitions for
\<open>\<T>\<close>, \<open>\<F>\<close> and \<open>\<D>\<close> lifting \<open>fst\<close> and \<open>snd\<close> on the new \<open>'\<alpha> process\<close>-type allows to derive
the above properties for any \<open>P::'\<alpha> process\<close>. \<close>
@@ -298,11 +299,9 @@ This boils down to a proof that an equivalent definition on the pre-process type
maintains \<open>is_process\<close>, \<^ie> this predicate remains invariant on the elements of the semantic domain.
For example, we define \<open>_\<sqinter>_\<close> on the pre-process type as follows:


\<^item> \<^theory_text>\<open>definition "P \<sqinter> Q \<equiv> Abs_process(\<F> P \<union> \<F> Q , \<D> P \<union> \<D> Q)"\<close>


\<^noindent> where \<open>\<F> = fst \<circ> Rep_process\<close> and \<open>\<D> = snd \<circ> Rep_process\<close> and where \<open>Rep_process\<close> and
where \<open>\<F> = fst \<circ> Rep_process\<close> and \<open>\<D> = snd \<circ> Rep_process\<close> and where \<open>Rep_process\<close> and
\<open>Abs_process\<close> are the representation and abstraction morphisms resulting from the
type definition linking \<open>'\<alpha> process\<close> isomorphically to \<open>'\<alpha> process\<^sub>0\<close>. Proving the above properties
for \<open>\<F> (P \<sqinter> Q)\<close> and \<open>\<D> (P \<sqinter> Q)\<close> requires a proof that \<open>(\<F> P \<union> \<F> Q , \<D> P \<union> \<D> Q)\<close>
@@ -360,7 +359,7 @@ We define \<open>P \<sqsubseteq> Q \<equiv> \<psi>\<^sub>\<D> \<and> \<psi>\<^su
\<^enum> \<open>\<psi>\<^sub>\<M> = Mins(\<D> P) \<subseteq> \<T> Q \<close>
\<close>

text\<open>\<^noindent> Note that the third condition \<open>\<psi>\<^sub>\<M>\<close> implies that the set of minimal divergent traces
text\<open>The third condition \<open>\<psi>\<^sub>\<M>\<close> implies that the set of minimal divergent traces
(ones with no proper prefix that is also a divergence) in \<open>P\<close>, denoted by \<open>Mins(\<D> P)\<close>,
should be a subset of the trace set of \<open>Q\<close>.
%One may note that each element in \<open>Mins(\<D> P)\<close> do actually not contain the \<open>\<surd>\<close>,
@@ -397,7 +396,7 @@ The port of HOL-CSP 2 on HOLCF implied that the derivation of the entire continu
had to be completely re-done (3000 loc).

\<^noindent> HOL-CSP provides an important proof principle, the fixed-point induction:
HOL-CSP provides an important proof principle, the fixed-point induction:

@{cartouche [display, indent=5] \<open>cont f \<Longrightarrow> adm P \<Longrightarrow> P \<bottom> \<Longrightarrow> (\<And>X. P X \<Longrightarrow> P(f X)) \<Longrightarrow> P(\<mu>X. f X)\<close>}

@@ -528,34 +527,34 @@ To handle termination better, we added two new processes \<open>CHAOS\<^sub>S\<^
%thus must be without it.
\<close>

text*[X22::"definition"]\<open>\<open>RUN A \<equiv> \<mu> X. \<box> x \<in> A \<rightarrow> X\<close> \<close>
text*[X32::"definition"]\<open>\<open>CHAOS A \<equiv> \<mu> X. (STOP \<sqinter> (\<box> x \<in> A \<rightarrow> X))\<close> \<close>
Definition*[X42::"definition"]\<open>\<open>CHAOS\<^sub>S\<^sub>K\<^sub>I\<^sub>P A \<equiv> \<mu> X. (SKIP \<sqinter> STOP \<sqinter> (\<box> x \<in> A \<rightarrow> X))\<close>\<close>
(*<*) (* a test ...*)
text*[X22 ::math_content ]\<open>\<open>RUN A \<equiv> \<mu> X. \<box> x \<in> A \<rightarrow> X\<close> \<close>
text*[X32::"definition", mcc=defn]\<open>\<open>CHAOS A \<equiv> \<mu> X. (STOP \<sqinter> (\<box> x \<in> A \<rightarrow> X))\<close> \<close>
Definition*[X42]\<open>\<open>CHAOS\<^sub>S\<^sub>K\<^sub>I\<^sub>P A \<equiv> \<mu> X. (SKIP \<sqinter> STOP \<sqinter> (\<box> x \<in> A \<rightarrow> X))\<close> \<close>
Definition*[X52::"definition"]\<open>\<open>CHAOS\<^sub>S\<^sub>K\<^sub>I\<^sub>P A \<equiv> \<mu> X. (SKIP \<sqinter> STOP \<sqinter> (\<box> x \<in> A \<rightarrow> X))\<close> \<close>

text\<open> The \<open>RUN\<close>-process defined @{definition X22} represents the process that accepts all
text\<open> The \<open>RUN\<close>-process defined @{math_content X22} represents the process that accepts all
events, but never stops nor deadlocks. The \<open>CHAOS\<close>-process comes in two variants shown in
@{definition X32} and @{definition X42}: the process that non-deterministically stops or
accepts any offered event, whereas \<open>CHAOS\<^sub>S\<^sub>K\<^sub>I\<^sub>P\<close> can additionally terminate.\<close>
@{definition X32} and @{definition X42} @{definition X52}: the process that non-deterministically
stops or accepts any offered event, whereas \<open>CHAOS\<^sub>S\<^sub>K\<^sub>I\<^sub>P\<close> can additionally terminate.\<close>
(*>*)

Definition*[X2]\<open>\<open>RUN A \<equiv> \<mu> X. \<box> x \<in> A \<rightarrow> X\<close> \<close>
Definition*[X3]\<open>\<open>CHAOS A \<equiv> \<mu> X. (STOP \<sqinter> (\<box> x \<in> A \<rightarrow> X))\<close> \<close>
Definition*[X2]\<open>\<open>RUN A \<equiv> \<mu> X. \<box> x \<in> A \<rightarrow> X\<close> \<close>
Definition*[X3]\<open>\<open>CHAOS A \<equiv> \<mu> X. (STOP \<sqinter> (\<box> x \<in> A \<rightarrow> X))\<close> \<close>
Definition*[X4]\<open>\<open>CHAOS\<^sub>S\<^sub>K\<^sub>I\<^sub>P A \<equiv> \<mu> X. (SKIP \<sqinter> STOP \<sqinter> (\<box> x \<in> A \<rightarrow> X))\<close>\<close>
Definition*[X5]\<open>\<open>DF A \<equiv> \<mu> X. (\<sqinter> x \<in> A \<rightarrow> X)\<close> \<close>
Definition*[X6]\<open>\<open>DF\<^sub>S\<^sub>K\<^sub>I\<^sub>P A \<equiv> \<mu> X. ((\<sqinter> x \<in> A \<rightarrow> X) \<sqinter> SKIP)\<close> \<close>
Definition*[X5]\<open>\<open>DF A \<equiv> \<mu> X. (\<sqinter> x \<in> A \<rightarrow> X)\<close> \<close>
Definition*[X6]\<open>\<open>DF\<^sub>S\<^sub>K\<^sub>I\<^sub>P A \<equiv> \<mu> X. ((\<sqinter> x \<in> A \<rightarrow> X) \<sqinter> SKIP)\<close> \<close>

text\<open> \<^noindent>
In the following, we denote \<open> \<R>\<P> = {DF\<^sub>S\<^sub>K\<^sub>I\<^sub>P, DF, RUN, CHAOS, CHAOS\<^sub>S\<^sub>K\<^sub>I\<^sub>P}\<close>.
text\<open>In the following, we denote \<open> \<R>\<P> = {DF\<^sub>S\<^sub>K\<^sub>I\<^sub>P, DF, RUN, CHAOS, CHAOS\<^sub>S\<^sub>K\<^sub>I\<^sub>P}\<close>.
All five reference processes are divergence-free.
%which was done by using a particular lemma \<open>\<D> (\<mu> x. f x) = \<Inter>\<^sub>i\<^sub>\<in>\<^sub>\<nat> \<D> (f\<^sup>i \<bottom>)\<close>.


@{cartouche [display,indent=8] \<open> D (\<PP> UNIV) = {} where \<PP> \<in> \<R>\<P> and UNIV is the set of all events\<close>}


@{cartouche
[display,indent=8] \<open> D (\<PP> UNIV) = {} where \<PP> \<in> \<R>\<P> and UNIV is the set of all events\<close>
}
Regarding the failure refinement ordering, the set of failures \<open>\<F> P\<close> for any process \<open>P\<close> is
a subset of \<open>\<F> (CHAOS\<^sub>S\<^sub>K\<^sub>I\<^sub>P UNIV)\<close>.% and the following lemma was proved:
% This proof is performed by induction, based on the failure projection of \<open>STOP\<close> and that of internal choice.
% This proof is performed by induction, based on the failure projection of \<open>STOP\<close> and that of
% internal choice.


@{cartouche [display, indent=25] \<open>CHAOS\<^sub>S\<^sub>K\<^sub>I\<^sub>P UNIV \<sqsubseteq>\<^sub>\<F> P\<close>}
@@ -615,8 +614,6 @@ be deadlocked after any non-terminating trace.

Theorem*[T1, short_name="\<open>DF definition captures deadlock-freeness\<close>"]
\<open> \hfill \break \<open>deadlock_free P \<longleftrightarrow> (\<forall>s\<in>\<T> P. tickFree s \<longrightarrow> (s, {\<surd>}\<union>events_of P) \<notin> \<F> P)\<close> \<close>


Definition*[X11]\<open> \<open>livelock\<^sub>-free P \<equiv> \<D> P = {} \<close> \<close>

text\<open> Recall that all five reference processes are livelock-free.
@@ -632,12 +629,12 @@ Finally, we proved the following theorem that confirms the relationship between
properties:
\<close>
Theorem*[T2, short_name="''DF implies LF''"]
\<open> \hspace{0.5cm} \<open>deadlock_free P \<longrightarrow> livelock_free P\<close> \<close>
\<open> \<open>deadlock_free P \<longrightarrow> livelock_free P\<close> \<close>

text\<open>
This is totally natural, at a first glance, but surprising as the proof of deadlock-freeness only requires
failure refinement \<open>\<sqsubseteq>\<^sub>\<F>\<close> (see @{definition \<open>X10\<close>}) where divergence traces are mixed within the failures set.
Note that the existing tools in the literature normally detect these two phenomena
This is totally natural, at a first glance, but surprising as the proof of deadlock-freeness only
requires failure refinement \<open>\<sqsubseteq>\<^sub>\<F>\<close> (see @{definition \<open>X10\<close>}) where divergence traces are mixed within
the failures set. Note that the existing tools in the literature normally detect these two phenomena
separately, such as FDR for which checking livelock-freeness is very costly.
In our framework, deadlock-freeness of a given system
implies its livelock-freeness. However, if a system is not deadlock-free,
@@ -695,13 +692,13 @@ refinement orderings. We state:

@{theory_text [display,indent=5] \<open>lemma: COPY \<sqsubseteq> SYSTEM\<close>}

\<^noindent> and apply fixed-point induction over \<open>COPY\<close>; this leaves us to the three subgoals:
and apply fixed-point induction over \<open>COPY\<close>; this leaves us to the three subgoals:
\<^enum> \<open>adm (\<lambda>a. a \<sqsubseteq> (SEND \<lbrakk>SYN\<rbrakk> REC) \ SYN)\<close>
\<^enum> \<open>\<bottom> \<sqsubseteq> (SEND \<lbrakk>SYN\<rbrakk> REC) \ SYN\<close>
\<^enum> @{cartouche [display]\<open>P \<sqsubseteq> (SEND \<lbrakk>SYN\<rbrakk> REC) \ SYN \<Longrightarrow>
left?x \<rightarrow> right!x \<rightarrow> P \<sqsubseteq> (SEND \<lbrakk>SYN\<rbrakk> REC) \ SYN\<close>}

\<^noindent> The first two sub-proofs are automatic simplification proofs; the third requires unfolding
The first two sub-proofs are automatic simplification proofs; the third requires unfolding
\<open>SEND\<close> and \<open>REC\<close> one step and applying the algebraic laws. No denotational
semantics reasoning is necessary here; it is just an induct-simplify proof consisting
of 2 lines proof-script involving the derived algebraic laws of \<^csp>.


+ 17
- 16
examples/technical_report/Isabelle_DOF-Manual/00_Frontmatter.thy View File

@@ -16,24 +16,25 @@ theory "00_Frontmatter"
imports "Isabelle_DOF.technical_report"
begin

section\<open>Document Local Setup.\<close>
text\<open>Some internal setup, introducing document specific abbreviations and macros.\<close>

setup \<open>DOF_lib.define_shortcut \<^binding>\<open>dof\<close> "\\dof"\<close>
setup \<open>DOF_lib.define_shortcut \<^binding>\<open>isadof\<close> "\\isadof"\<close>
setup \<open> DOF_lib.define_shortcut \<^binding>\<open>TeXLive\<close>"\\TeXLive"
#> DOF_lib.define_shortcut \<^binding>\<open>BibTeX\<close> "\\BibTeX{}"
#> DOF_lib.define_shortcut \<^binding>\<open>LaTeX\<close> "\\LaTeX{}"
#> DOF_lib.define_shortcut \<^binding>\<open>TeX\<close> "\\TeX{}"
#> DOF_lib.define_shortcut \<^binding>\<open>pdf\<close> "PDF"
#> DOF_lib.define_shortcut \<^binding>\<open>pdftex\<close> "\\pdftex{}"
\<close>

text\<open>Note that these setups assume that the associated \<^LaTeX> macros are defined, \<^eg>,
in the document prelude. \<close>
section\<open>Local Document Setup.\<close>
text\<open>... introducing document specific abbreviations and macros.\<close>

define_shortcut* dof \<rightleftharpoons> \<open>\dof\<close>
isadof \<rightleftharpoons> \<open>\isadof\<close>

define_shortcut* TeXLive \<rightleftharpoons> \<open>\TeXLive\<close>
BibTeX \<rightleftharpoons> \<open>\BibTeX{}\<close>
LaTeX \<rightleftharpoons> \<open>\LaTeX{}\<close>
TeX \<rightleftharpoons> \<open>\TeX{}\<close>
pdf \<rightleftharpoons> \<open>PDF\<close>
pdftex \<rightleftharpoons> \<open>\pdftex{}\<close>

text\<open>Note that these setups assume that the associated \<^LaTeX> macros
are defined, \<^eg>, in the document prelude. \<close>

setup\<open> DOF_lib.define_macro \<^binding>\<open>index\<close> "\\index{" "}" (K(K())) (*no checking, no reporting*)
#> DOF_lib.define_macro \<^binding>\<open>bindex\<close> "\\bindex{" "}"(K(K()))\<close>
define_macro* index \<rightleftharpoons> \<open>\index{\<close> _ \<open>}\<close>
define_macro* bindex \<rightleftharpoons> \<open>\bindex{\<close> _ \<open>}\<close>


ML\<open>


+ 2
- 0
examples/technical_report/Isabelle_DOF-Manual/03_GuidedTour.thy View File

@@ -16,6 +16,7 @@ theory
"03_GuidedTour"
imports
"02_Background"
"Isabelle_DOF.technical_report"
"Isabelle_DOF.CENELEC_50128"
begin
(*>*)
@@ -424,6 +425,7 @@ doc_class "theorem" = math_content +
mcc :: "math_content_class" <= "thm" ...
\<close>}\<close>


text\<open>The class \<^verbatim>\<open>technical\<close> regroups a number of text-elements that contain typical
"technical content" in mathematical or engineering papers: code, definitions, theorems,
lemmas, examples. From this class, the more stricter class of @{typ \<open>math_content\<close>} is derived,


+ 617
- 244
examples/technical_report/Isabelle_DOF-Manual/04_RefMan.thy View File

@@ -16,144 +16,84 @@ theory
"04_RefMan"
imports
"03_GuidedTour"
"Isabelle_DOF.Isa_COL"
"Isabelle_DOF.technical_report"
begin

declare_reference*[infrastructure::technical]

(*>*)

chapter*[isadof_ontologies::technical]\<open>Developing Ontologies\<close>
chapter*[isadof_ontologies::technical]\<open>Ontologies and their Development\<close>

text\<open>
In this chapter, we explain the concepts for modeling new ontologies, developing a document
representation for them, as well as developing new document templates.
\<close>
In this chapter, we explain the concepts of \<^isadof> in a more systematic way, and give
guidelines for modeling new ontologies, present underlying concepts for a mapping to a
representation, and give hints for the development of new document templates.

section*[infrastructure::technical]\<open>Overview and Technical Infrastructure\<close>
text\<open>
\<^isadof> is embedded in the underlying generic document model of Isabelle as described in
\<^introduction>\<open>dof\<close>. Recall that the document language can be extended dynamically, \<^ie>, new
\<open>user-defined\<close> can be introduced at run-time. This is similar to the definition of new functions
in an interpreter. \<^isadof> as a system plugin is is a number of new command definitions in
in an interpreter. \<^isadof> as a system plugin provides a number of new command definitions in
Isabelle's document model.

\<^isadof> consists consists basically of four components:
\<^item> an own \<^emph>\<open>family of text-elements\<close> such as \<^boxed_theory_text>\<open>title*\<close>, \<^boxed_theory_text>\<open>chapter*\<close>
\<^boxed_theory_text>\<open>text*\<close>, etc., which can be annotated with meta-information defined in the
underlying ontology definition and allow to build a \<^emph>\<open>core\<close> document,
\<^item> the \<^emph>\<open>ontology definition language\<close> (called ODL) which allow for the definitions
of document-classes and necessary auxiliary datatypes,
\<^isadof> consists consists basically of five components:
\<^item> the \<^emph>\<open>DOF-core\<close> providing the \<^emph>\<open>ontology definition language\<close> (called ODL)
which allow for the definitions of document-classes and necessary auxiliary datatypes,
\<^item> the \<^emph>\<open>DOF-core\<close> also provides an own \<^emph>\<open>family of commands\<close> such as
\<^boxed_theory_text>\<open>text*\<close>, \<^boxed_theory_text>\<open>declare_reference*\<close>, \<^etc>.;
They allow for the annotation of text-elements with meta-information defined in ODL,
\<^item> the \<^isadof> library of ontologies providing ontological concepts as well
as supporting infrastructure,
\<^item> an infrastructure for ontology-specific \<^emph>\<open>layout definitions\<close>, exploiting this meta-information,
and
\<^item> an infrastructure for generic \<^emph>\<open>layout definitions\<close> for documents following, \<^eg>, the format
guidelines of publishers or standardization bodies.
\<close>

text\<open>
The list of fully supported (\<^ie>, supporting both interactive ontological modeling and
document generation) ontologies and the list of supported document templates can be
obtained by calling \inlinebash|isabelle mkroot_DOF -h| (see \<^technical>\<open>first_project\<close>).
Note that the postfix \inlinebash|-UNSUPPORTED| denotes experimental ontologies or templates
for which further manual setup steps might be required or that are not fully tested. Also note
that the \<^LaTeX>-class files required by the templates need to be already installed on your
system. This is mostly a problem for publisher specific templates (\<^eg>, Springer's
\<^path>\<open>llncs.cls\<close>), which cannot be re-distributed due to copyright restrictions.
\<close>

subsection\<open>Ontologies\<close>
text\<open>
The document core \<^emph>\<open>may\<close>, but \<^emph>\<open>must\<close> not use Isabelle definitions or proofs for checking the
formal content---this manual is actually an example of a document not containing any proof.
Consequently, the document editing and checking facility provided by \<^isadof> addresses the needs
of common users for an advanced text-editing environment, neither modeling nor proof knowledge is
inherently required.

We expect authors of ontologies to have experience in the use of \<^isadof>, basic modeling (and,
potentially, some basic SML programming) experience, basic \<^LaTeX> knowledge, and, last but not
least, domain knowledge of the ontology to be modeled. Users with experience in UML-like
meta-modeling will feel familiar with most concepts; however, we expect no need for insight in
the Isabelle proof language, for example, or other more advanced concepts.

Technically, ontologies\<^index>\<open>ontology!directory structure\<close> are stored in a directory
\inlinebash|src/ontologies| and consist of a Isabelle theory file and a \<^LaTeX> -style file:
Similarly to Isabelle, which is based on a core logic \<^theory>\<open>Pure\<close> and then extended by libraries
to major systems like \<^verbatim>\<open>HOL\<close> or \<^verbatim>\<open>FOL\<close>, \<^isadof> has a generic core infrastructure \<^dof> and then
presents itself to users via major library extensions, which add domain-specific
system-extensions. Consequently, ontologies in \<^isadof> are not just a sequence of descriptions in
\<^isadof>'s Ontology Definition Language (ODL). Rather, they are themselves presented as integrated
sources that provide textual decriptions, abbreviations, macro-support and even ML-code.
Conceptually, the library of \<^isadof> is currently organized as follows
\<^footnote>\<open>Note that the \<^emph>\<open>technical\<close> organisation is slightly different and shown in
@{technical (unchecked) \<open>infrastructure\<close>}.\<close>:
%
\begin{center}
\begin{minipage}{.9\textwidth}
\dirtree{%
.1 .
.2 src.
.3 ontologies\DTcomment{Ontologies}.
.4 ontologies.thy\DTcomment{Ontology Registration}.
.4 CENELEC\_50128\DTcomment{CENELEC\_50128}.
.5 CENELEC\_50128.thy.
.5 DOF-CENELEC\_50128.sty.
.4 scholarly\_paper\DTcomment{scholarly\_paper}.
.5 scholarly\_paper.thy.
.5 DOF-scholarly\_paper.sty.
.1 COL\DTcomment{The Common Ontology Library}.
.2 scholarly\_paper\DTcomment{Scientific Papers}.
.3 technical\_report\DTcomment{Extended Papers}.
.4 CENELEC\_50128\DTcomment{Papers according to CENELEC\_50128}.
.4 CC\_v3\_1\_R5\DTcomment{Papers according to Common Criteria}.
.4 \ldots.
}
\end{minipage}
\end{center}
\<close>
text\<open>
Developing a new ontology ``\inlinebash|foo|'' requires, from a technical perspective, the
following steps:
\<^item> create a new sub-directory \inlinebash|foo| in the directory \inlinebash|src/ontologies|
\<^item> definition of the ontological concepts, using \<^isadof>'s Ontology Definition Language (ODL), in
a new theory file \<^path>\<open>src/ontologies/foo/foo.thy\<close>.
\<^item> definition of the document representation for the ontological concepts in a \LaTeX-style
file \<^path>\<open>src/ontologies/foo/DOF-foo.sty\<close>
\<^item> registration (as import) of the new ontology in the file.
\<^path>\<open>src/ontologies/ontologies.thy\<close>.
\<^item> activation of the new document setup by executing the install script. You can skip the lengthy
checks for the AFP entries and the installation of the Isabelle patch by using the
\inlinebash|--skip-patch-and-afp| option:
\begin{bash}
ë\prompt{\isadofdirn}ë ./install --skip-patch-and-afp
\end{bash}
\<close>

subsection\<open>Document Templates\<close>
text\<open>
Document-templates\<^index>\<open>document template\<close> define the overall layout (page size, margins, fonts,
etc.) of the generated documents and are the the main technical means for implementing layout
requirements that are, \<^eg>, required by publishers or standardization bodies. Document-templates
are stored in a directory
\<^path>\<open>src/document-templates\<close>:\<^index>\<open>document template!directory structure\<close>
\begin{center}
\begin{minipage}{.9\textwidth}
\dirtree{%
.1 .
.2 src.
.3 document-templates\DTcomment{Document templates}.
.4 root-lncs.tex.
.4 root-scrartcl.tex.
.4 root-scrreprt-modern.tex.
.4 root-scrreprt.tex.
}
\end{minipage}
\end{center}
\<close>

text\<open>
Developing a new document template ``\inlinebash|bar|'' requires the following steps:
\<^item> develop a new \<^LaTeX>-template \inlinebash|src/document-templates/root-bar.tex|
\<^item> activation of the new document template by executing the install script. You can skip the lengthy
checks for the AFP entries and the installation of the Isabelle patch by using the
\inlinebash|--skip-patch-and-afp| option:
\begin{bash}
ë\prompt{\isadofdirn}ë ./install --skip-patch-and-afp
\end{bash}
\<close>


text\<open>
As the document generation of \<^isadof> is based
on \<^LaTeX>, the \<^isadof> document templates can (and should) make use of any \<^LaTeX>-classes provided
by publishers or standardization bodies.
These libraries not only provide ontological concepts, but also syntactic sugar in Isabelle's
command language Isar that is of major importance for users (and may be felt as \<^isadof> key
features by many authors). In reality,
they are derived concepts from more generic ones; for example, the commands
\<^boxed_theory_text>\<open>title*\<close>, \<^boxed_theory_text>\<open>section*\<close>, \<^boxed_theory_text>\<open>subsection*\<close>, \<^etc>,
are in reality a kind of macros for \<^boxed_theory_text>\<open>text*[<label>::title]...\<close>,
\<^boxed_theory_text>\<open>text*[<label>::section]...\<close>, respectively.
These example commands are defined in the COL.

As mentioned earlier, our ontology framework is currently particularly geared towards
\<^emph>\<open>document\<close> editing, structuring and presentation (future applications might be advanced
"knowledge-based" search procedures as well as tool interaction). For this reason, ontologies
are coupled with \<^emph>\<open>layout definitions\<close> allowing an automatic mapping of an integrated
source into \<^LaTeX> and finally \<^pdf>. The mapping of an ontology to a specific representation
in \<^LaTeX> is steered via associated \<^LaTeX> stylefiles which were included during Isabelle's
document generation process. This mapping is potentially a one-to-many mapping;
this implies a certain technical organisation and some resulting restrictions
described in @{technical (unchecked) \<open>infrastructure\<close>} in more detail.
\<close>

section\<open>The Ontology Definition Language (ODL)\<close>

text\<open>
ODL shares some similarities with meta-modeling languages such as UML class
models: It builds upon concepts like class, inheritance, class-instances, attributes, references
@@ -263,7 +203,7 @@ text\<open>
text\<open>
Advanced ontologies can, \<^eg>, use recursive function definitions with
pattern-matching~@{cite "kraus:defining:2020"}, extensible record
pecifications~@{cite "wenzel:isabelle-isar:2020"}, and abstract type declarations.
specifications~@{cite "wenzel:isabelle-isar:2020"}, and abstract type declarations.
\<close>

text\<open>Note that \<^isadof> works internally with fully qualified names in order to avoid confusions
@@ -296,7 +236,6 @@ A document class\<^bindex>\<open>document class\<close> can be defined using the
\<^rail>\<open> 'inv' (name '::')? '"' term '"' \<close>
\<^item> \<open>accepts_clause\<close>:\<^index>\<open>accepts\_clause@\<open>accepts_clause\<close>\<close>
\<^rail>\<open> 'accepts' '"' regexpr '"'\<close>
\<^clearpage>
\<^item> \<open>rejects_clause\<close>:\<^index>\<open>rejects\_clause@\<open>rejects_clause\<close>\<close>
\<^rail>\<open> 'rejects' (class_id * ',') \<close>
\<^item> \<open>default_clause\<close>:\<^index>\<open>default\_clause@\<open>default_clause\<close>\<close>
@@ -356,43 +295,161 @@ text\<open>
special characters in definitions that need to make use of a entries in an aux-file.
\<close>

subsection\<open>Common Ontology Library (COL)\<close>
section\<open>Fundamental Commands of the \<^isadof> Core\<close>
text\<open>Besides the core-commands to define an ontology as presented in the previous section,
the \<^isadof> core provides a number of mechanisms to \<^emph>\<open>use\<close> the resulting data to annotate
text-elements and, in some cases, terms.
\<close>

text\<open>\<^isadof> uses the concept of implicit abstract classes (or: \<^emph>\<open>shadow classes\<close>).
These refer to the set of possible \<^boxed_theory_text>\<open>doc_class\<close> declarations that posses a number
of attributes with their types in common. Shadow classes represent an implicit requirement
(or pre-condition) on a given class to posses these attributes in order to work properly
for certain \<^isadof> commands.
subsection\<open>Syntax\<close>
text\<open>In the following, we formally introduce the syntax of the core commands as
supported on the Isabelle/Isar level. Note that some more advanced functionality of the Core
is currently only available in the SML API's of the kernel.

shadow classes will find concrete instances in COL, but \<^isadof> text elements do not \<^emph>\<open>depend\<close>
on our COL definitions: Ontology developers are free to build own class instances for these
shadow classes, with own attributes and, last not least, own definitions of invariants independent
from ours.
\<^item> \<open>meta_args\<close> :
\<^rail>\<open>obj_id ('::' class_id) ((',' attribute '=' term) *) \<close>
\<^item> \<open>upd_meta_args\<close> :
\<^rail>\<open> (obj_id ('::' class_id) ((',' attribute ('=' | '+=') term) * ))\<close>
\<^item> \<open>annotated_text_element\<close> :
\<^rail>\<open>
( @@{command "text*"}'[' meta_args ']' '\<open>' text '\<close>' |
( @@{command "open_monitor*"}
| @@{command "close_monitor*"}
| @@{command "declare_reference*"}
) '[' meta_args ']'
)
| change_status_command
| inspection_command
| macro_command
\<close>
\<^item> \<^isadof> \<open>change_status_command\<close> :
\<^rail>\<open> (@@{command "update_instance*"} '[' upd_meta_args ']')
| (@@{command "declare_reference*"} (obj_id ('::' class_id)))\<close>
\<^item> \<^isadof> \<open>inspection_command\<close> :
\<^rail>\<open> @@{command "print_doc_classes"}
| @@{command "print_doc_items"}
| @@{command "check_doc_global"}\<close>
\<^item> \<^isadof> \<open>macro_command\<close> :
\<^rail>\<open> @@{command "define_shortcut*"} name ('\<rightleftharpoons>' | '==') '\<open>' string '\<close>'
| @@{command "define_macro*"} name ('\<rightleftharpoons>' | '==')
\<newline> '\<open>' string '\<close>' '_' '\<open>' string '\<close>' \<close>
\<close>
text\<open>Recall that with the exception of \<^theory_text>\<open>text* \<dots> \<close>, all \<^isadof> commands were mapped to visible
layout (such as \<^LaTeX>); these commands have to be wrapped into
\<^verbatim>\<open>(*<*) ... (*>*)\<close> brackets if this is undesired. \<close>

subsection\<open>Ontologic Text-Elements and their Management\<close>
text\<open> \<^theory_text>\<open>text*[oid::cid, ...] \<open>\<open>\<close> \<dots> text \<dots> \<open>\<close>\<close> \<close> is the core-command of \<^isadof>: it permits to create
an object of meta-data belonging to the class \<^theory_text>\<open>cid\<close>. This is viewed as the \<^emph>\<open>definition\<close> of
an instance of a document class. This instance object is attached to the text-element
and makes it thus "trackable" for \<^isadof>, \<^ie>, it can be referenced via the \<^theory_text>\<open>oid\<close>, its attributes
can be set by defaults in the class-definitions, or set at creation time, or modified at any
point after creation via \<^theory_text>\<open>update_instance*[oid, ...]\<close>. The \<^theory_text>\<open>class_id\<close> is syntactically optional;
if ommitted, an object belongs to an anonymous superclass of all classes.
The \<^theory_text>\<open>class_id\<close> is used to generate a \<^emph>\<open>class-type\<close> in HOL; note that this may impose lexical
restrictions as well as to name-conflicts in the surrounding logical context.
In many cases, it is possible to use the class-type to denote the \<^theory_text>\<open>class_id\<close>; this also
holds for type-synonyms on class-types.

References to text-elements can occur textually before creation; in these cases, they must be
declared via \<^theory_text>\<open>declare_reference*[...]\<close> in order to compromise to Isabelle's fundamental
"declaration-before-use" linear-visibility evaluation principle. The forward-declared class-type
must be identical with the defined class-type.

For a declared class \<^theory_text>\<open>cid\<close>, there exists a text antiquotation of the form \<^theory_text>\<open> @{cid \<open>oid\<close>} \<close>.
The precise presentation is decided in the \<^emph>\<open>layout definitions\<close>, for example by suitable
\<^LaTeX>-template code. Declared but not yet defined instances must be referenced with a particular
pragma in order to enforce a relaxed checking \<^theory_text>\<open> @{cid (unchecked) \<open>oid\<close>} \<close>.

% there should also exist a *term* antiquotation ...
\<close>

In particular, these shadow classes are used at present in \<^isadof>:
@{boxed_theory_text [display]\<open>
DOCUMENT_ALIKES =
level :: "int option" <= "None"
(*<*)
declare_reference*["sec:advanced"::technical]
(*>*)

ASSERTION_ALIKES =
properties :: "term list"
FORMAL_STATEMENT_ALIKE =
properties :: "thm list"
\<close>}
subsection\<open>Status and Query Commands\<close>
text\<open>\<^isadof> provides a number of inspection commands.
\<^item> \<^theory_text>\<open>print_doc_classes\<close> allows to view the status of the internal
class-table resulting from ODL definitions,
\<^item> \<^ML>\<open>DOF_core.print_doc_class_tree\<close> allows for presenting (fragments) of
class-inheritance trees (currently only available at ML level),
\<^item> \<^theory_text>\<open>print_doc_items\<close> allows to view the status of the internal
object-table of text-elements that were tracked, and
\<^item> \<^theory_text>\<open>check_doc_global\<close> checks if all declared object references have been
defined, and all monitors are in a final state and final invariant checks
on all objects are satisfied (cf. @{technical (unchecked) \<open>sec:advanced\<close>})
\<close>

These shadow-classes correspond to semantic macros
\<^ML>\<open>Onto_Macros.enriched_document_cmd_exp\<close>,
\<^ML>\<open>Onto_Macros.assertion_cmd'\<close>, and
\<^ML>\<open>Onto_Macros.enriched_formal_statement_command\<close>.\<close>
subsection\<open>Macros\<close>
text\<open>There is a mechanism to define document-local short-cuts and macros which
were PIDE-supported but lead to an expansion in the integrated source; this feature
can be used to define
\<^item> \<^theory_text>\<open>shortcuts\<close>, \<^ie>, short names that were expanded to, for example,
\<^LaTeX>-code,
\<^item> \<^theory_text>\<open>macro\<close>'s (= parameterized short-cuts), which allow for
passing an argument to the expansion mechanism.
\<close>
text\<open>Note that the argument can be checked by an own SML-function with respect to syntactic
as well as semantic regards; however, the latter feature is currently only accessible at
the SML level and not directly in the Isar language. We would like to stress, that this
feature is basically an abstract interface to existing Isabelle functionality in the document
generation.
\<close>
subsubsection\<open>Examples\<close>
text\<open>
\<^item> common short-cut hiding \<^LaTeX> code in the integrated source:
@{theory_text [display] \<open>
define_shortcut* eg \<rightleftharpoons> \<open>\eg\<close> (* Latin: „exempli gratia“ meaning „for example“. *)
clearpage \<rightleftharpoons> \<open>\clearpage{}\<close>
\<close>}
\<^item> non-checking macro:
@{theory_text [display] \<open>
define_macro* index \<rightleftharpoons> \<open>\index{\<close> _ \<open>}\<close>
\<close>}
\<^item> checking macro:
@{theory_text [display] \<open>
setup\<open> DOF_lib.define_macro \<^binding>\<open>vs\<close> "\\vspace{" "}" (check_latex_measure) \<close>
\<close>}
where \<^ML>\<open>check_latex_measure\<close> is a hand-programmed function that checks
the input for syntactical and static semantic constraints.
\<close>


text\<open> \<^isadof> provides a Common Ontology Library (COL)\<^index>\<open>Common Ontology Library@see COL\<close>
\<^bindex>\<open>COL\<close> that introduces ontology concepts that are either sample instances for shadow
classes as we use them in our own document generation processes or, in some cases, are
so generic that they we expect them to be useful for all types of documents (figures, for example).
\<close>
section\<open>The Standard Ontology Libraries\<close>
text\<open> We will describe the backbone of the Standard Library with the
already mentioned hierarchy \<^verbatim>\<open>COL\<close> (the common ontology library),
\<^verbatim>\<open>scholarly_paper\<close> (for MINT-oriented scientific papers),
\<^verbatim>\<open>technical_report\<close> (for MINT-oriented technical reports), and
the example for a domain-specific ontology
\<^verbatim>\<open>CENELEC_50128\<close>.\<close>

subsection\<open>Common Ontology Library (COL)\<close>
(*<*)
ML\<open>writeln (DOF_core.print_doc_class_tree @{context} (fn (n,l) => String.isPrefix "Isa_COL" l) I)\<close>
(*>*)
text\<open>
\<^isadof> provides a Common Ontology Library (COL)\<^index>\<open>Common Ontology Library@see COL\<close>
\<^bindex>\<open>COL\<close> \<^footnote>\<open>contained in \<^theory>\<open>Isabelle_DOF.Isa_COL\<close>\<close>
that introduces several ontology root concepts such as common text-elements and
figures. The overall class-tree it provides looks as follows:
%
\begin{center}
\begin{minipage}{.9\textwidth}
\dirtree{%
.0 .
.1 Isa\_COL.text\_element.
.2 Isa\_COL.chapter.
.2 Isa\_COL.section.
.2 Isa\_COL.subsection.
.2 Isa\_COL.subsubsection.
.1 Isa\_COL.figure.
.2 Isa\_COL.side\_by\_side\_figure.
.1 Isa\_COL.figure\_group.
.1 \ldots.
}
\end{minipage}
\end{center}\<close>

text\<open>
In particular it defines the super-class \<^boxed_theory_text>\<open>text_element\<close>: the root of all
@@ -405,7 +462,8 @@ doc_class text_element =
variants :: "String.literal set" <= "{STR ''outline'', STR ''document''}"
\<close>}

Here, \<^boxed_theory_text>\<open>level\<close> defines the section-level (\<^eg>, using a \<^LaTeX>-inspired hierarchy:
As mentioned in @{technical \<open>sss\<close>} (without explaining the origin of \<^typ>\<open>text_element\<close>)
, \<^boxed_theory_text>\<open>level\<close> defines the section-level (\<^eg>, using a \<^LaTeX>-inspired hierarchy:
from \<^boxed_theory_text>\<open>Some -1\<close> (corresponding to \inlineltx|\part|) to
\<^boxed_theory_text>\<open>Some 0\<close> (corresponding to \inlineltx|\chapter|, respectively, \<^boxed_theory_text>\<open>chapter*\<close>)
to \<^boxed_theory_text>\<open>Some 3\<close> (corresponding to \inlineltx|\subsubsection|, respectively,
@@ -413,18 +471,309 @@ to \<^boxed_theory_text>\<open>Some 3\<close> (corresponding to \inlineltx|\subs
any sequence of technical-elements must be introduced by a text-element with a higher level
(this would require that technical text section are introduce by a section element).

Similarly, we provide "minimal" instances of the \<^boxed_theory_text>\<open>ASSERTION_ALIKES\<close>
and \<^boxed_theory_text>\<open>FORMAL_STATEMENT_ALIKE\<close> shadow classes:
The attribute \<^term>\<open>referentiable\<close> captures the information if a text-element can be target
for a reference, which is the case for sections or subsections, for example, but not arbitrary
elements such as, \<^ie>, paragraphs (this mirrors restrictions of the target \<^LaTeX> representation).
The attribute \<^term>\<open>variants\<close> refers to an Isabelle-configuration attribute that permits
to steer the different versions a \<^LaTeX>-presentation of the integrated source.


For further information of the root classes such as \<^typ>\<open>figure\<close>'s, please consult the ontology
\<^theory>\<open>Isabelle_DOF.Isa_COL\<close> directly.

COL finally provides macros that extend the command-language of the DOF-core by the following
abbreviations:

\<^item> \<open>derived_text_element\<close> :
\<^rail>\<open>
( ( @@{command "chapter*"}
| @@{command "section*"} | @@{command "subsection*"} | @@{command "subsubsection*"}
| @@{command "paragraph*"} | @@{command "subparagraph*"}
| @@{command "figure*"} | @@{command "side_by_side_figure*"}
)
\<newline>
'[' meta_args ']' '\<open>' text '\<close>'
)
\<close>
\<close>
text\<open> Note that the command syntax follows the implicit convention to add a "*" to
the command in order to distinguish them from the standard Isabelle text-commands
which are not "ontology-aware" but function similar otherwise.\<close>

subsection*["text-elements"::technical]\<open>The Ontology \<^theory>\<open>Isabelle_DOF.scholarly_paper\<close>\<close>
(*<*)
ML\<open>val toLaTeX = String.translate (fn c => if c = #"_" then "\\_" else String.implode[c])\<close>
ML\<open>writeln (DOF_core.print_doc_class_tree
@{context} (fn (n,l) => String.isPrefix "scholarly_paper" l
orelse String.isPrefix "Isa_COL" l)
toLaTeX)\<close>
(*>*)
text\<open> The \<^verbatim>\<open>scholarly_paper\<close> ontology is oriented towards the classical domains in science:
\<^enum> mathematics
\<^enum> informatics
\<^enum> natural sciences
\<^enum> technology and/or engineering

It extends \<^verbatim>\<open>COL\<close> by the following concepts:
\begin{center}
\begin{minipage}{.9\textwidth}
\dirtree{%
.0 .
.1 scholarly\_paper.title.
.1 scholarly\_paper.subtitle.
.1 scholarly\_paper.author\DTcomment{An Author Entity Declaration}.
.1 scholarly\_paper.abstract.
.1 Isa\_COL.text\_element.
.2 scholarly\_paper.text\_section\DTcomment{Major Paper Text-Elements}.
.3 scholarly\_paper.introduction\DTcomment{...}.
.3 scholarly\_paper.conclusion\DTcomment{...}.
.4 scholarly\_paper.related\_work\DTcomment{...}.
.3 scholarly\_paper.bibliography\DTcomment{...}.
.3 scholarly\_paper.annex\DTcomment{...}.
.3 scholarly\_paper.example\DTcomment{Example in General Sense}.
.3 scholarly\_paper.technical\DTcomment{Root for Technical Content}.
.4 scholarly\_paper.math\_content\DTcomment{...}.
.5 scholarly\_paper.definition\DTcomment{Freeform}.
.5 scholarly\_paper.lemma\DTcomment{Freeform}.
.5 scholarly\_paper.theorem\DTcomment{Freeform}.
.5 scholarly\_paper.corollary\DTcomment{Freeform}.
.5 scholarly\_paper.math\_example\DTcomment{Freeform}.
.5 scholarly\_paper.math\_semiformal\DTcomment{Freeform}.
.5 scholarly\_paper.math\_formal\DTcomment{Formal(=Checked) Content}.
.6 scholarly\_paper.assertion\DTcomment{Assertions}.
.4 scholarly\_paper.tech\_example\DTcomment{...}.
.4 scholarly\_paper.math\_motivation\DTcomment{...}.
.4 scholarly\_paper.math\_explanation\DTcomment{...}.
.4 scholarly\_paper.engineering\_content\DTcomment{...}.
.5 scholarly\_paper.data.
.5 scholarly\_paper.evaluation.
.5 scholarly\_paper.experiment.
.4 ...
.1 ...
.1 scholarly\_paper.article\DTcomment{The Paper Monitor}.
.1 \ldots.
}
\end{minipage}
\end{center}

TODO: There are some slight problems in the hierarchy ...

\<close>

text\<open>A pivotal abstract class in the hierarchy is:
@{boxed_theory_text [display]
\<open>
doc_class text_section = text_element +
main_author :: "author option" <= None
fixme_list :: "string list" <= "[]"
level :: "int option" <= "None"
\<close>}

Besides attributes of more practical considerations like a fixme-list, that can be modified during
the editing process but is only visible in the integrated source but usually ignored in the
\<^LaTeX>, this class also introduces the possibility to assign an "ownership" or "responsibility" of
a text-element to a specific author. Note that this is possible since \<^isadof> assigns to each
document class also a class-type which is declared in the HOL environment.\<close>

(*<*)
declare_reference*["text-elements-expls"::example]
(*>*)
text*[s23::example, main_author = "Some(@{docitem \<open>bu\<close>}::author)"]\<open>
Recall that concrete authors can be denoted by term-antiquotations generated by \<^isadof>; for example,
this may be for a text fragment like
@{boxed_theory_text [display]
\<open>text*[\<dots>::example, main_author = "Some(@{docitem ''bu''}::author)"] \<open>\<open>\<close> \<dots> \<open>\<close>\<close>\<close>}
or
@{boxed_theory_text [display]
\<open>text*[\<dots>::example, main_author = "Some(@{docitem \<open>bu\<close>}::author)"] \<open>\<open>\<close> \<dots> \<open>\<close>\<close>\<close>}

where \<^boxed_theory_text>\<open>"''bu''"\<close> is a string presentation of the reference to the author
text element (see below in @{docitem (unchecked) \<open>text-elements-expls\<close>}).
\<close>

text\<open>Some of these concepts were supported as command-abbreviations leading to the extension
of the \<^isadof> language:

\<^item> \<open>derived_text_elements \<close> :
\<^rail>\<open>
( ( @@{command "author*"}
| @@{command "abstract*"}
| @@{command "Definition*"} | @@{command "Lemma*"} | @@{command "Theorem*"}
)
\<newline>
'[' meta_args ']' '\<open>' text '\<close>'
)
| @@{command "assert*"} '[' meta_args ']' '\<open>' term '\<close>'
\<close>
\<close>

text\<open>Usually, command macros for text elements will assign to the default class corresponding for this
class. For pragmatic reasons, \<^theory_text>\<open>Definition*\<close>, \<^theory_text>\<open>Lemma*\<close> and \<^theory_text>\<open>Theorem*\<close> represent an exception
of this rule and are set up such that the default class is the super class @{typ \<open>math_content\<close>}
(rather than to the class @{typ \<open>definition\<close>}).
This way, it is possible to use these macros for several different sorts of the very generic
concept "definition", which can be used as a freeform mathematical definition but also for a
freeform terminological definition as used in certification standards. Moreover, new subclasses
of @{typ \<open>math_content\<close>} might be introduced in a derived ontology with an own specific layout
definition.
\<close>

text\<open>While this library is intended to give a lot of space to freeform text elements in
order to counterbalance Isabelle's standard view, it should not be forgot that the real strength
of Isabelle is its ability to handle both - and to establish links between both worlds.
Therefore the formal assertion command has been integrated to capture some form of formal content.\<close>


subsubsection*["text-elements-expls"::example]\<open>Examples\<close>

text\<open>
While the default user interface for class definitions via the
\<^boxed_theory_text>\<open>text*\<open> ... \<close>\<close>-command allow to access all features of the document
class, \<^isadof> provides short-hands for certain, widely-used, concepts such as
\<^boxed_theory_text>\<open>title*\<open> ... \<close>\<close> or \<^boxed_theory_text>\<open>section*\<open> ... \<close>\<close>, \<^eg>:

@{boxed_theory_text [display]\<open>
doc_class assertions =
properties :: "term list"
doc_class "thms" =
properties :: "thm list"
title*[title::title]\<open>Isabelle/DOF\<close>
subtitle*[subtitle::subtitle]\<open>User and Implementation Manual\<close>
author*[adb::author, email="\<open>a.brucker@exeter.ac.uk\<close>",
orcid="\<open>0000-0002-6355-1200\<close>", http_site="\<open>https://brucker.ch/\<close>",
affiliation="\<open>University of Exeter, Exeter, UK\<close>"] \<open>Achim D. Brucker\<close>
author*[bu::author, email = "\<open>wolff@lri.fr\<close>",
affiliation = "\<open>Université Paris-Saclay, LRI, Paris, France\<close>"]\<open>Burkhart Wolff\<close>
\<close>}

\<close>

text\<open>Assertions allow for logical statements to be checked in the global context).
This is particularly useful to explore formal definitions wrt. to their border cases. \<close>

assert*[ass1::assertion, short_name = "\<open>This is an assertion\<close>"] \<open>last [3] < (4::int)\<close>

text\<open>We want to check the consequences of this definition and can add the following statements:
@{boxed_theory_text [display]\<open>
text*[claim::assertion]\<open>For non-empty lists, our definition yields indeed
the last element of a list.\<close>
assert*[claim1::assertion] "last[4::int] = 4"
assert*[claim2::assertion] "last[1,2,3,4::int] = 4"
\<close>}
\<close>


text\<open>
As mentioned before, the command macros of \<^theory_text>\<open>Definition*\<close>, \<^theory_text>\<open>Lemma*\<close> and \<^theory_text>\<open>Theorem*\<close>
set the default class to the super-class of @{typ \<open>definition\<close>}.
However, in order to avoid the somewhat tedious consequence:
@{boxed_theory_text [display]
\<open>Theorem*[T1::"theorem", short_name="\<open>DF definition captures deadlock-freeness\<close>"] \<open>\<open>\<close> \<dots> \<open>\<close>\<close>\<close>}

the choice of the default class can be influenced by setting globally an attribute such as
@{boxed_theory_text [display]
\<open>declare[[ Definition_default_class = "definition"]]
declare[[ Theorem_default_class = "theorem"]]
\<close>}

which allows the above example be shortened to:
@{boxed_theory_text [display]
\<open>Theorem*[T1, short_name="\<open>DF definition captures deadlock-freeness\<close>"] \<open>\<open>\<close> \<dots> \<open>\<close>\<close>
\<close>}
\<close>

subsection\<open>The Ontology \<^theory>\<open>Isabelle_DOF.technical_report\<close>\<close>
(*<*)
ML\<open>val toLaTeX = String.translate (fn c => if c = #"_" then "\\_" else String.implode[c])\<close>
ML\<open>writeln (DOF_core.print_doc_class_tree
@{context} (fn (n,l) => true (* String.isPrefix "technical_report" l
orelse String.isPrefix "Isa_COL" l *))
toLaTeX)\<close>
(*>*)
text\<open> The \<^verbatim>\<open>technical_report\<close> ontology extends \<^verbatim>\<open>scholarly_paper\<close> by concepts needed
for larger reports in the domain of mathematics and engineering. The concepts are fairly
high-level arranged at root-class level,

%
\begin{center}
\begin{minipage}{.9\textwidth}
\dirtree{%
.0 .
.1 technical\_report.front\_matter\DTcomment{...}.
.1 technical\_report.table\_of\_contents\DTcomment{...}.
.1 Isa\_COL.text\_element\DTcomment{...}.
.2 scholarly\_paper.text\_section\DTcomment{...}.
.4 technical\_report.code\DTcomment{...}.
.5 technical\_report.SML\DTcomment{...}.
.5 technical\_report.ISAR\DTcomment{...}.
.5 technical\_report.LATEX\DTcomment{...}.
.1 technical\_report.index\DTcomment{...}.
.1 ...
.1 technical\_report.report\DTcomment{...}.
}
\end{minipage}
\end{center}
\<close>


subsection\<open>A Domain-Specific Ontology: \<^theory>\<open>Isabelle_DOF.CENELEC_50128\<close>\<close>
(*<*)
ML\<open>val toLaTeX = String.translate (fn c => if c = #"_" then "\\_" else String.implode[c])\<close>
ML\<open>writeln (DOF_core.print_doc_class_tree
@{context} (fn (n,l) => true (* String.isPrefix "technical_report" l
orelse String.isPrefix "Isa_COL" l *))
toLaTeX)\<close>
(*>*)
text\<open> The \<^verbatim>\<open>CENELEC_50128\<close> is qn exqmple of q domqin-specific ontology. It
is based on \<^verbatim>\<open>technical_report\<close> since we assume that this kind of format will be most
appropriate for this type of long-and-tedious documents,

%
\begin{center}
\begin{minipage}{.9\textwidth}
\dirtree{%
.0 .
.1 CENELEC\_50128.judgement\DTcomment{...}.
.1 CENELEC\_50128.test\_item\DTcomment{...}.
.2 CENELEC\_50128.test\_case\DTcomment{...}.
.2 CENELEC\_50128.test\_tool\DTcomment{...}.
.2 CENELEC\_50128.test\_result\DTcomment{...}.
.2 CENELEC\_50128.test\_adm\_role\DTcomment{...}.
.2 CENELEC\_50128.test\_environment\DTcomment{...}.
.2 CENELEC\_50128.test\_requirement\DTcomment{...}.
.2 CENELEC\_50128.test\_specification\DTcomment{...}.
.1 CENELEC\_50128.objectives\DTcomment{...}.
.1 CENELEC\_50128.design\_item\DTcomment{...}.
.2 CENELEC\_50128.interface\DTcomment{...}.
.1 CENELEC\_50128.sub\_requirement\DTcomment{...}.
.1 CENELEC\_50128.test\_documentation\DTcomment{...}.
.1 Isa\_COL.text\_element\DTcomment{...}.
.2 CENELEC\_50128.requirement\DTcomment{...}.
.3 CENELEC\_50128.AC\DTcomment{...}.
.4 CENELEC\_50128.EC\DTcomment{...}.
.5 CENELEC\_50128.SRAC\DTcomment{...}.
.3 CENELEC\_50128.TC\DTcomment{...}.
.3 CENELEC\_50128.FnI\DTcomment{...}.
.3 CENELEC\_50128.SIR\DTcomment{...}.
.3 CENELEC\_50128.CoAS\DTcomment{...}.
.3 CENELEC\_50128.HtbC\DTcomment{...}.
.3 CENELEC\_50128.SILA\DTcomment{...}.
.3 CENELEC\_50128.assumption\DTcomment{...}.
.3 CENELEC\_50128.hypothesis\DTcomment{...}.
.4 CENELEC\_50128.security\_hyp\DTcomment{...}.
.3 CENELEC\_50128.safety\_requirement\DTcomment{...}.
.2 CENELEC\_50128.cenelec\_text\DTcomment{...}.
.3 CENELEC\_50128.SWAS\DTcomment{...}.
.3 [...].
.2 scholarly\_paper.text\_section\DTcomment{...}.
.3 scholarly\_paper.technical\DTcomment{...}.
.4 scholarly\_paper.math\_content\DTcomment{...}.
.5 CENELEC\_50128.semi\_formal\_content\DTcomment{...}.
.1 ...
}
\end{minipage}
\end{center}
\<close>

subsubsection\<open>Example: Text Elemens with Levels\<close>
(* TODO : Rearrange ontology hierarchies. *)

subsubsection\<open>Examples\<close>
text\<open>
The category ``exported constraint (EC)'' is, in the file
\<^file>\<open>../../../src/ontologies/CENELEC_50128/CENELEC_50128.thy\<close> defined as follows:
@@ -480,85 +829,9 @@ can now be defined as follows:
\end{ltx}
\<close>

subsubsection\<open>Example: Assertions\<close>
text\<open>Assertions are a common feature to validate properties of models, presented as a collection
of Isabelle/HOL definitions. They are particularly relevant for highlighting corner cases of a
formal model. For example, assume a definition: \<close>

definition last :: "'a list \<Rightarrow> 'a" where "last S = hd(rev S)"

(* Old stuff using abstract classes.
(*<*)
text*[claim::assertions]\<open>For non-empty lists, our definition yields indeed the last element of a list.\<close>
assert*[claim::assertions] "last[4::int] = 4"
assert*[claim::assertions] "last[1,2,3,4::int] = 4"
(*>*)
*)
text\<open>We want to check the consequences of this definition and can add the following statements:
@{boxed_theory_text [display]\<open>
text*[claim::assertions]\<open>For non-empty lists, our definition yields indeed
the last element of a list.\<close>
assert*[claim1::assertions] "last[4::int] = 4"
assert*[claim2::assertions] "last[1,2,3,4::int] = 4"
\<close>}
\<close>

text\<open>As an \<^boxed_theory_text>\<open>ASSERTION_ALIKES\<close>, the \<^boxed_theory_text>\<open>assertions\<close> class possesses a
\<^boxed_theory_text>\<open>properties\<close> attribute. The \<^boxed_theory_text>\<open>assert*\<close> command evaluates its argument;
in case it evaluates to true the property is added to the property list of the \<^boxed_theory_text>\<open>claim\<close> -
text-element. Commands like \<^boxed_theory_text>\<open>Definitions*\<close> or \<^boxed_theory_text>\<open>Theorem*\<close> work analogously.\<close>


subsection*["text-elements"::technical]\<open>Annotatable Top-level Text-Elements\<close>
text\<open>
While the default user interface for class definitions via the
\<^boxed_theory_text>\<open>text*\<open> ... \<close>\<close>-command allow to access all features of the document
class, \<^isadof> provides short-hands for certain, widely-used, concepts such as
\<^boxed_theory_text>\<open>title*\<open> ... \<close>\<close> or \<^boxed_theory_text>\<open>section*\<open> ... \<close>\<close>, \<^eg>:

@{boxed_theory_text [display]\<open>
title*[title::title]\<open>Isabelle/DOF\<close>
subtitle*[subtitle::subtitle]\<open>User and Implementation Manual\<close>
text*[adb:: author, email="\<open>a.brucker@exeter.ac.uk\<close>",
orcid="\<open>0000-0002-6355-1200\<close>", http_site="\<open>https://brucker.ch/\<close>",
affiliation="\<open>University of Exeter, Exeter, UK\<close>"] \<open>Achim D. Brucker\<close>
text*[bu::author, email = "\<open>wolff@lri.fr\<close>",
affiliation = "\<open>Université Paris-Saclay, LRI, Paris, France\<close>"]\<open>Burkhart Wolff\<close>
\<close>}

In general, all standard text-elements from the Isabelle document model such
as \<^theory_text>\<open>chapter\<close>, \<^theory_text>\<open>section\<close>, \<^theory_text>\<open>text\<close>, have in the \<^isadof>
implementation their counterparts in the family of text-elements that are ontology-aware,
\<^ie>, they dispose on a meta-argument list that allows to define that a test-element
that has an identity as a text-object labelled as \<open>obj_id\<close>, belongs to a document class
\<open>class_id\<close> that has been defined earlier, and has its class-attributes set with particular
values (which are denotable in Isabelle/HOL mathematical term syntax).
\<^item> \<open>meta_args\<close> :
\<^rail>\<open>(obj_id ('::' class_id) ((attribute '=' term)) * ',')\<close>
\<^item> \<open>rich_meta_args\<close> :
\<^rail>\<open> (obj_id ('::' class_id) ((attribute (('=' | '+=') term)) * ','))\<close>
\<^clearpage>
\<^item> \<open>annotated_text_element\<close> :
\<^rail>\<open>
( ( @@{command "title*"}
| @@{command "subtitle*"}
| @@{command "chapter*"}
| @@{command "section*"} | @@{command "subsection*"}
| @@{command "subsubsection*"} | @@{command "paragraph*"} | @@{command "subparagraph*"}
| @@{command "text*"} | @@{command "figure*"} | @@{command "side_by_side_figure*"}
| @@{command "open_monitor*"} | @@{command "close_monitor*"}
| @@{command "Definition*"} | @@{command "Lemma*"}
)
\<newline>
'[' meta_args ']' '\<open>' text '\<close>'
)
| change_status_command
| inspection_command
\<close>
\<close>


subsubsection\<open>Experts: Defining New Top-Level Commands\<close>
subsubsection\<open>For Isabelle Hackers: Defining New Top-Level Commands\<close>

text\<open>
Defining such new top-level commands requires some Isabelle knowledge as well as
@@ -619,21 +892,10 @@ schemata:
\end{ltx}
\<close>

subsection*["inspections-commands"::technical]\<open>Status and Inspection Commands\<close>
text\<open>
\<^item> \<^isadof> \<open>change_status_command\<close> :
\<^rail>\<open> (@@{command "update_instance*"} '[' rich_meta_args ']')
| (@@{command "declare_reference*"} (obj_id ('::' class_id)))\<close>
\<^item> \<^isadof> \<open>inspection_command\<close> :
\<^rail>\<open> @@{command "print_doc_classes"}
| @@{command "print_doc_items"}
| @@{command "check_doc_global"}\<close>
\<close>



subsection*["sec:advanced"::technical]\<open>Advanced ODL Concepts\<close>
subsubsection\<open>Meta-types as Types\<close>
section*["sec:advanced"::technical]\<open>Advanced ODL Concepts\<close>
subsection\<open>Meta-types as Types\<close>

text\<open>
To express the dependencies between text elements to the formal
@@ -663,7 +925,7 @@ text\<open>
\<close>


subsubsection*["sec:monitors"::technical]\<open>ODL Monitors\<close>
subsection*["sec:monitors"::technical]\<open>ODL Monitors\<close>
text\<open>
We call a document class with an accept-clause a \<^emph>\<open>monitor\<close>.\<^bindex>\<open>monitor\<close> Syntactically, an
accept-clause\<^index>\<open>accept-clause\<close> contains a regular expression over class identifiers.
@@ -715,7 +977,7 @@ text\<open>
sections.\<close>


subsubsection*["sec:class_inv"::technical]\<open>ODL Class Invariants\<close>
subsection*["sec:class_inv"::technical]\<open>ODL Class Invariants\<close>
text\<open>
Ontological classes as described so far are too liberal in many situations. For example, one
would like to express that any instance of a \<^boxed_theory_text>\<open>result\<close> class finally has a
@@ -763,6 +1025,119 @@ fun check_result_inv oid {is_monitor:bool} ctxt =
\<close>



section*[infrastructure::technical]\<open>Technical Infrastructure\<close>

text\<open>
The list of fully supported (\<^ie>, supporting both interactive ontological modeling and
document generation) ontologies and the list of supported document templates can be
obtained by calling \inlinebash|isabelle mkroot_DOF -h| (see \<^technical>\<open>first_project\<close>).
Note that the postfix \inlinebash|-UNSUPPORTED| denotes experimental ontologies or templates
for which further manual setup steps might be required or that are not fully tested. Also note
that the \<^LaTeX>-class files required by the templates need to be already installed on your
system. This is mostly a problem for publisher specific templates (\<^eg>, Springer's
\<^path>\<open>llncs.cls\<close>), which cannot be re-distributed due to copyright restrictions.
\<close>

subsection\<open>Developing Ontologies and their Represenation Mappings\<close>
text\<open>
The document core \<^emph>\<open>may\<close>, but \<^emph>\<open>must\<close> not use Isabelle definitions or proofs for checking the
formal content---this manual is actually an example of a document not containing any proof.
Consequently, the document editing and checking facility provided by \<^isadof> addresses the needs
of common users for an advanced text-editing environment, neither modeling nor proof knowledge is
inherently required.

We expect authors of ontologies to have experience in the use of \<^isadof>, basic modeling (and,
potentially, some basic SML programming) experience, basic \<^LaTeX> knowledge, and, last but not
least, domain knowledge of the ontology to be modeled. Users with experience in UML-like
meta-modeling will feel familiar with most concepts; however, we expect no need for insight in
the Isabelle proof language, for example, or other more advanced concepts.

Technically, ontologies\<^index>\<open>ontology!directory structure\<close> are stored in a directory
\inlinebash|src/ontologies| and consist of a Isabelle theory file and a \<^LaTeX> -style file:
%
\begin{center}
\begin{minipage}{.9\textwidth}
\dirtree{%
.1 .
.2 src.
.3 ontologies\DTcomment{Ontologies}.
.4 ontologies.thy\DTcomment{Ontology Registration}.
.4 scholarly\_paper\DTcomment{scholarly\_paper}.
.5 scholarly\_paper.thy.
.5 DOF-scholarly\_paper.sty.
.4 technical\_report\DTcomment{technical\_paper}.
.5 technical\_report.thy.
.5 DOF-technical\_report.sty.
.4 CENELEC\_50128\DTcomment{CENELEC\_50128}.
.5 CENELEC\_50128.thy.
.5 DOF-CENELEC\_50128.sty.
.4 \ldots.
}
\end{minipage}
\end{center}
\<close>
text\<open>
Developing a new ontology ``\inlinebash|foo|'' requires, from a technical perspective, the
following steps:
\<^item> create a new sub-directory \inlinebash|foo| in the directory \inlinebash|src/ontologies|
\<^item> definition of the ontological concepts, using \<^isadof>'s Ontology Definition Language (ODL), in
a new theory file \<^path>\<open>src/ontologies/foo/foo.thy\<close>.
\<^item> definition of the document representation for the ontological concepts in a \LaTeX-style
file \<^path>\<open>src/ontologies/foo/DOF-foo.sty\<close>
\<^item> registration (as import) of the new ontology in the file.
\<^path>\<open>src/ontologies/ontologies.thy\<close>.
\<^item> activation of the new document setup by executing the install script. You can skip the lengthy
checks for the AFP entries and the installation of the Isabelle patch by using the
\inlinebash|--skip-patch-and-afp| option:
\begin{bash}
ë\prompt{\isadofdirn}ë ./install --skip-patch-and-afp
\end{bash}
\<close>

subsection\<open>Document Templates\<close>
text\<open>
Document-templates\<^index>\<open>document template\<close> define the overall layout (page size, margins, fonts,
etc.) of the generated documents and are the the main technical means for implementing layout
requirements that are, \<^eg>, required by publishers or standardization bodies. Document-templates
are stored in a directory
\<^path>\<open>src/document-templates\<close>:\<^index>\<open>document template!directory structure\<close>
\begin{center}
\begin{minipage}{.9\textwidth}
\dirtree{%
.1 .
.2 src.
.3 document-templates\DTcomment{Document templates}.
.4 root-lncs.tex.
.4 root-scrartcl.tex.
.4 root-scrreprt-modern.tex.
.4 root-scrreprt.tex.
}
\end{minipage}
\end{center}
\<close>

text\<open>
Developing a new document template ``\inlinebash|bar|'' requires the following steps:
\<^item> develop a new \<^LaTeX>-template \inlinebash|src/document-templates/root-bar.tex|
\<^item> activation of the new document template by executing the install script. You can skip the lengthy
checks for the AFP entries and the installation of the Isabelle patch by using the
\inlinebash|--skip-patch-and-afp| option:
\begin{bash}
ë\prompt{\isadofdirn}ë ./install --skip-patch-and-afp
\end{bash}
\<close>


text\<open>
As the document generation of \<^isadof> is based
on \<^LaTeX>, the \<^isadof> document templates can (and should) make use of any \<^LaTeX>-classes provided
by publishers or standardization bodies.
\<close>


section*["document-templates"::technical]\<open>Defining Document Templates\<close>
subsection\<open>The Core Template\<close>

@@ -989,8 +1364,6 @@ text\<open>





(*<*)
end
(*>*)


+ 1
- 1
examples/technical_report/Isabelle_DOF-Manual/05_Implementation.thy View File

@@ -22,7 +22,7 @@ chapter*[isadof_developers::text_section]\<open>Extending \<^isadof>\<close>
text\<open>
In this chapter, we describe the basic implementation aspects of \<^isadof>, which is based on
the following design-decisions:
\<^item> the entire \<^isadof> is a ``pure add-on,'' \ie, we deliberately resign on the possibility to
\<^item> the entire \<^isadof> is a ``pure add-on,'' \<^ie>, we deliberately resign on the possibility to
modify Isabelle itself.
\<^item> we made a small exception to this rule: the \<^isadof> package modifies in its installation
about 10 lines in the \LaTeX-generator (\path{src/patches/thy_output.ML}).


+ 28
- 27
examples/technical_report/TR_my_commented_isabelle/TR_MyCommentedIsabelle.thy View File

@@ -14,9 +14,10 @@
(*<*)
theory TR_MyCommentedIsabelle
imports "Isabelle_DOF.technical_report"

begin

setup \<open> DOF_lib.define_shortcut \<^binding>\<open>isabelle\<close> "Isabelle/HOL"\<close>
define_shortcut* isabelle \<rightleftharpoons> \<open>Isabelle/HOL\<close>

open_monitor*[this::report]
(*>*)
@@ -181,7 +182,7 @@ ML\<open>
\<close>
(*>*)

text\<open>\<^vs>\<open>-1,0cm\<close>... which we will describe in more detail later. \<close>
text\<open>\<^vs>\<open>-1.0cm\<close>... which we will describe in more detail later. \<close>

text\<open>In a way, anti-quotations implement a kind of
literate specification style in text, models, code, proofs, etc., which become alltogether
@@ -483,28 +484,28 @@ text\<open>Note, furthermore, that there is a programming API for the HOL-instan
operators of the HOL logic specific constructors and destructors:\<close>

text*[T2::technical]\<open>
\<^enum> \<^ML>\<open>HOLogic.boolT : typ\<close> \<^vs>\<open>-0,2cm\<close>
\<^enum> \<^ML>\<open>HOLogic.mk_Trueprop : term -> term\<close>, the embedder of bool to prop fundamental for HOL \<^vs>\<open>-0,2cm\<close>
\<^enum> \<^ML>\<open>HOLogic.dest_Trueprop : term -> term\<close> \<^vs>\<open>-0,2cm\<close>
\<^enum> \<^ML>\<open>HOLogic.Trueprop_conv : conv -> conv\<close> \<^vs>\<open>-0,2cm\<close>
\<^enum> \<^ML>\<open>HOLogic.mk_setT : typ -> typ\<close>, the ML level type constructor set \<^vs>\<open>-0,2cm\<close>
\<^enum> \<^ML>\<open>HOLogic.dest_setT : typ -> typ\<close>, the ML level type destructor for set \<^vs>\<open>-0,2cm\<close>
\<^enum> \<^ML>\<open>HOLogic.Collect_const : typ -> term\<close> \<^vs>\<open>-0,2cm\<close>
\<^enum> \<^ML>\<open>HOLogic.mk_Collect : string * typ * term -> term\<close> \<^vs>\<open>-0,2cm\<close>
\<^enum> \<^ML>\<open>HOLogic.mk_mem : term * term -> term\<close> \<^vs>\<open>-0,2cm\<close>
\<^enum> \<^ML>\<open>HOLogic.dest_mem : term -> term * term\<close> \<^vs>\<open>-0,2cm\<close>
\<^enum> \<^ML>\<open>HOLogic.mk_set : typ -> term list -> term\<close> \<^vs>\<open>-0,2cm\<close>
\<^enum> \<^ML>\<open>HOLogic.conj_intr : Proof.context -> thm -> thm -> thm\<close>, some HOL-level derived-inferences \<^vs>\<open>-0,2cm\<close>
\<^enum> \<^ML>\<open>HOLogic.conj_elim : Proof.context -> thm -> thm * thm\<close> \<^vs>\<open>-0,2cm\<close>
\<^enum> \<^ML>\<open>HOLogic.conj_elims : Proof.context -> thm -> thm list\<close> \<^vs>\<open>-0,2cm\<close>
\<^enum> \<^ML>\<open>HOLogic.conj : term\<close> , some ML level logical constructors \<^vs>\<open>-0,2cm\<close>
\<^enum> \<^ML>\<open>HOLogic.disj : term\<close> \<^vs>\<open>-0,2cm\<close>
\<^enum> \<^ML>\<open>HOLogic.imp : term\<close> \<^vs>\<open>-0,2cm\<close>
\<^enum> \<^ML>\<open>HOLogic.Not : term\<close> \<^vs>\<open>-0,2cm\<close>
\<^enum> \<^ML>\<open>HOLogic.mk_not : term -> term\<close> \<^vs>\<open>-0,2cm\<close>
\<^enum> \<^ML>\<open>HOLogic.mk_conj : term * term -> term\<close> \<^vs>\<open>-0,2cm\<close>
\<^enum> \<^ML>\<open>HOLogic.dest_conj : term -> term list\<close> \<^vs>\<open>-0,2cm\<close>
\<^enum> \<^ML>\<open>HOLogic.conjuncts : term -> term list\<close> \<^vs>\<open>-0,2cm\<close>
\<^enum> \<^ML>\<open>HOLogic.boolT : typ\<close> \<^vs>\<open>-0.2cm\<close>
\<^enum> \<^ML>\<open>HOLogic.mk_Trueprop : term -> term\<close>, the embedder of bool to prop fundamental for HOL \<^vs>\<open>-0.2cm\<close>
\<^enum> \<^ML>\<open>HOLogic.dest_Trueprop : term -> term\<close> \<^vs>\<open>-0.2cm\<close>
\<^enum> \<^ML>\<open>HOLogic.Trueprop_conv : conv -> conv\<close> \<^vs>\<open>-0.2cm\<close>
\<^enum> \<^ML>\<open>HOLogic.mk_setT : typ -> typ\<close>, the ML level type constructor set \<^vs>\<open>-0.2cm\<close>
\<^enum> \<^ML>\<open>HOLogic.dest_setT : typ -> typ\<close>, the ML level type destructor for set \<^vs>\<open>-0.2cm\<close>
\<^enum> \<^ML>\<open>HOLogic.Collect_const : typ -> term\<close> \<^vs>\<open>-0.2cm\<close>
\<^enum> \<^ML>\<open>HOLogic.mk_Collect : string * typ * term -> term\<close> \<^vs>\<open>-0.2cm\<close>
\<^enum> \<^ML>\<open>HOLogic.mk_mem : term * term -> term\<close> \<^vs>\<open>-0.2cm\<close>
\<^enum> \<^ML>\<open>HOLogic.dest_mem : term -> term * term\<close> \<^vs>\<open>-0.2cm\<close>
\<^enum> \<^ML>\<open>HOLogic.mk_set : typ -> term list -> term\<close> \<^vs>\<open>-0.2cm\<close>
\<^enum> \<^ML>\<open>HOLogic.conj_intr : Proof.context -> thm -> thm -> thm\<close>, some HOL-level derived-inferences \<^vs>\<open>-0.2cm\<close>
\<^enum> \<^ML>\<open>HOLogic.conj_elim : Proof.context -> thm -> thm * thm\<close> \<^vs>\<open>-0.2cm\<close>
\<^enum> \<^ML>\<open>HOLogic.conj_elims : Proof.context -> thm -> thm list\<close> \<^vs>\<open>-0.2cm\<close>
\<^enum> \<^ML>\<open>HOLogic.conj : term\<close> , some ML level logical constructors \<^vs>\<open>-0.2cm\<close>
\<^enum> \<^ML>\<open>HOLogic.disj : term\<close> \<^vs>\<open>-0.2cm\<close>
\<^enum> \<^ML>\<open>HOLogic.imp : term\<close> \<^vs>\<open>-0.2cm\<close>
\<^enum> \<^ML>\<open>HOLogic.Not : term\<close> \<^vs>\<open>-0.2cm\<close>
\<^enum> \<^ML>\<open>HOLogic.mk_not : term -> term\<close> \<^vs>\<open>-0.2cm\<close>
\<^enum> \<^ML>\<open>HOLogic.mk_conj : term * term -> term\<close> \<^vs>\<open>-0.2cm\<close>
\<^enum> \<^ML>\<open>HOLogic.dest_conj : term -> term list\<close> \<^vs>\<open>-0.2cm\<close>
\<^enum> \<^ML>\<open>HOLogic.conjuncts : term -> term list\<close> \<^vs>\<open>-0.2cm\<close>
\<^enum> ...
\<close>

@@ -702,7 +703,7 @@ proof - fix a :: nat

subsection*[t233::technical]\<open> Theories and the Signature API\<close>
text\<open>
\<^enum> \<^ML>\<open>Sign.tsig_of : theory -> Type.tsig\<close> extraxts the type-signature of a theory
\<^enum> \<^ML>\<open>Sign.tsig_of : theory -> Type.tsig\<close> extracts the type-signature of a theory
\<^enum> \<^ML>\<open>Sign.syn_of : theory -> Syntax.syntax\<close> extracts the constant-symbol signature
\<^enum> \<^ML>\<open>Sign.of_sort : theory -> typ * sort -> bool\<close> decides that a type belongs to a sort.
\<close>
@@ -2306,8 +2307,8 @@ text\<open> This interactive Isabelle Programming Cook-Book represents my curren
\<close>
(*<*)

paragraph\<open>Many thanks to Frederic Tuong, who contributed some example such as the string cartouche
for Unicode Character Denotations as well as many local hints for improvements.\<close>
paragraph\<open>Many thanks to Frederic Tuong, who contributed some example such as the string
cartouche for Unicode Character Denotations as well as many local hints for improvements.\<close>

section*[bib::bibliography]\<open>Bibliography\<close>



+ 15
- 193
src/DOF/Isa_COL.thy View File

@@ -23,12 +23,11 @@ text\<open> Building a fundamental infrastructure for common document elements s

theory Isa_COL
imports Isa_DOF
keywords "title*" "subtitle*" "chapter*"
"section*" "subsection*" "subsubsection*"
"paragraph*" "subparagraph*" :: document_body
and "figure*" "side_by_side_figure*" :: document_body
and "assert*" :: thy_decl
keywords "title*" "subtitle*"
"chapter*" "section*"
"subsection*" "subsubsection*"
"paragraph*" "subparagraph*"
"figure*" "side_by_side_figure*" :: document_body

begin
@@ -98,7 +97,7 @@ fun transform_cid thy NONE X = X
in if DOF_core.is_subclass_global thy sub_cid_long cid_long
then (SOME (sub_cid,pos))
else (* (SOME (sub_cid,pos)) *)
(* BUG : check reveals problem of Definition* misuse. *)
(* BUG : check reveals problem of Definition* misuse. *)
error("class "^sub_cid_long^
" must be sub-class of "^cid_long)
end
@@ -120,30 +119,6 @@ fun enriched_document_cmd_exp ncid (S: (string * string) list) =
end;
end (* local *)

fun assertion_cmd'((((((oid,pos),cid_pos),doc_attrs),name_opt:string option),modes : string list),
prop) =
let fun conv_2_holstring thy = (bstring_to_holstring (Proof_Context.init_global thy))
fun conv_attrs thy = (("properties",pos),"[@{termrepr ''"^conv_2_holstring thy prop ^" ''}]")
::doc_attrs
fun conv_attrs' thy = map (fn ((lhs,pos),rhs) => (((lhs,pos),"+="),rhs)) (conv_attrs thy)
fun mks thy = case DOF_core.get_object_global_opt oid thy of
SOME NONE => (error("update of declared but not created doc_item:" ^ oid))
| SOME _ => (update_instance_command (((oid,pos),cid_pos),conv_attrs' thy) thy)
| NONE => (create_and_check_docitem
{is_monitor = false} {is_inline = false}
oid pos cid_pos (conv_attrs thy) thy)
val check = (assert_cmd name_opt modes prop) o Proof_Context.init_global
in
(* Toplevel.keep (check o Toplevel.context_of) *)
Toplevel.theory (fn thy => (check thy; mks thy))
end


val _ =
Outer_Syntax.command @{command_keyword "assert*"}
"evaluate and print term"
(attributes -- opt_evaluator -- opt_modes -- Parse.term >> assertion_cmd');


val _ =
Outer_Syntax.command ("title*", @{here}) "section heading"
@@ -251,172 +226,20 @@ val _ =
end
\<close>

section\<open>Shortcuts, Macros, Environments\<close>
text\<open>The features described in this section are actually \<^emph>\<open>not\<close> real ISADOF features, rather a
slightly more abstract layer over somewhat buried standard features of the Isabelle document
generator ... (Thanks to Makarius) Conceptually, they are \<^emph>\<open>sub-text-elements\<close>. \<close>

text\<open>This module provides mechanisms to define front-end checked:
\<^enum> \<^emph>\<open>shortcuts\<close>, i.e. machine-checked abbreviations without arguments
that were mapped to user-defined LaTeX code (Example: \<^verbatim>\<open>\ie\<close>)
\<^enum> \<^emph>\<open>macros\<close> with one argument that were mapped to user-defined code. Example: \<^verbatim>\<open>\myurl{bla}\<close>.
The argument can be potentially checked and reports can be sent to PIDE;
if no such checking is desired, this can be expressed by setting the
\<^theory_text>\<open>reportNtest\<close>-parameter to \<^theory_text>\<open>K(K())\<close>.
\<^enum> \<^emph>\<open>macros\<close> with two arguments, potentially independently checked. See above.
Example: \<^verbatim>\<open>\myurl[ding]{dong}\<close>,
\<^enum> \<^emph>\<open>boxes\<close> which are more complex sub-text-elements in the line of the \<^verbatim>\<open>verbatim\<close> or
\<^verbatim>\<open>theory_text\<close> environments.

Note that we deliberately refrained from a code-template definition mechanism for simplicity,
so the patterns were just described by strings. No additional ado with quoting/unquoting
mechanisms ...
\<close>

ML\<open>
structure DOF_lib =
struct
fun define_shortcut name latexshcut =
Thy_Output.antiquotation_raw name (Scan.succeed ())
(fn _ => fn () => Latex.string latexshcut)

(* This is a generalization of the Isabelle2020 function "control_antiquotation" from
document_antiquotations.ML. (Thanks Makarius!) *)
fun define_macro name s1 s2 reportNtest =
Thy_Output.antiquotation_raw_embedded name (Scan.lift Args.cartouche_input)
(fn ctxt =>
fn src => let val () = reportNtest ctxt src
in src |> Latex.enclose_block s1 s2
o Thy_Output.output_document ctxt {markdown = false}
end);

local (* hide away really strange local construction *)
fun enclose_body2 front body1 middle body2 post =
(if front = "" then [] else [Latex.string front]) @ body1 @
(if middle = "" then [] else [Latex.string middle]) @ body2 @
(if post = "" then [] else [Latex.string post]);
in
fun define_macro2 name front middle post reportNtest1 reportNtest2 =
Thy_Output.antiquotation_raw_embedded name (Scan.lift ( Args.cartouche_input
-- Args.cartouche_input))
(fn ctxt =>
fn (src1,src2) => let val () = reportNtest1 ctxt src1
val () = reportNtest2 ctxt src2
val T1 = Thy_Output.output_document ctxt {markdown = false} src1
val T2 = Thy_Output.output_document ctxt {markdown = false} src2
in Latex.block(enclose_body2 front T1 middle T2 post)
end);
end

fun report_text ctxt text =
let val pos = Input.pos_of text in
Context_Position.reports ctxt
[(pos, Markup.language_text (Input.is_delimited text)),
(pos, Markup.raw_text)]
end;

fun report_theory_text ctxt text =
let val keywords = Thy_Header.get_keywords' ctxt;
val _ = report_text ctxt text;
val _ =
Input.source_explode text
|> Token.tokenize keywords {strict = true}
|> maps (Token.reports keywords)
|> Context_Position.reports_text ctxt;
in () end

fun prepare_text ctxt =
Input.source_content #> #1 #> Document_Antiquotation.prepare_lines ctxt;
(* This also produces indent-expansion and changes space to "\_" and the introduction of "\newline",
I believe. Otherwise its in Thy_Output.output_source, the compiler from string to LaTeX.text. *)

fun string_2_text_antiquotation ctxt text =
prepare_text ctxt text
|> Thy_Output.output_source ctxt
|> Thy_Output.isabelle ctxt

fun string_2_theory_text_antiquotation ctxt text =
let
val keywords = Thy_Header.get_keywords' ctxt;
in
prepare_text ctxt text
|> Token.explode0 keywords
|> maps (Thy_Output.output_token ctxt)
|> Thy_Output.isabelle ctxt
end

fun gen_text_antiquotation name reportNcheck compile =
Thy_Output.antiquotation_raw_embedded name (Scan.lift Args.text_input)
(fn ctxt => fn text:Input.source =>
let
val _ = reportNcheck ctxt text;
in
compile ctxt text
end);

fun std_text_antiquotation name (* redefined in these more abstract terms *) =
gen_text_antiquotation name report_text string_2_text_antiquotation

(* should be the same as (2020):
fun text_antiquotation name =
Thy_Output.antiquotation_raw_embedded name (Scan.lift Args.text_input)
(fn ctxt => fn text =>
let
val _ = report_text ctxt text;
in
prepare_text ctxt text
|> Thy_Output.output_source ctxt
|> Thy_Output.isabelle ctxt
end);
*)
(*<*)
(*
ML\<open>ML_Context.expression\<close>
fun setup source =
ML_Context.expression (Input.pos_of source)
(ML_Lex.read "Theory.setup (" @ ML_Lex.read_source source @ ML_Lex.read ")")
|> Context.theory_map;
setup\<open>\<close>

fun std_theory_text_antiquotation name (* redefined in these more abstract terms *) =
gen_text_antiquotation name report_theory_text string_2_theory_text_antiquotation

(* should be the same as (2020):
fun theory_text_antiquotation name =
Thy_Output.antiquotation_raw_embedded name (Scan.lift Args.text_input)
(fn ctxt => fn text =>
let
val keywords = Thy_Header.get_keywords' ctxt;

val _ = report_text ctxt text;
val _ =
Input.source_explode text
|> Token.tokenize keywords {strict = true}
|> maps (Token.reports keywords)
|> Context_Position.reports_text ctxt;
in
prepare_text ctxt text
|> Token.explode0 keywords
|> maps (Thy_Output.output_token ctxt)
|> Thy_Output.isabelle ctxt
|> enclose_env ctxt "isarbox"
end);
*)


fun environment_delim name =
("%\n\\begin{" ^ Latex.output_name name ^ "}\n",
"\n\\end{" ^ Latex.output_name name ^ "}");

fun environment_block name = environment_delim name |-> Latex.enclose_body #> Latex.block;


fun enclose_env verbatim ctxt block_env body =
if Config.get ctxt Document_Antiquotation.thy_output_display
then if verbatim
then environment_block block_env [body]
else Latex.environment_block block_env [body]
else Latex.block ([Latex.string ("\\inline"^block_env ^"{")] @ [body] @ [ Latex.string ("}")]);

end
\<close>

(*>*)

section\<open>Tables\<close>
(* TODO ! ! ! *)

(* dito the future monitor: table - block *)


@@ -426,5 +249,4 @@ ML\<open>@{term "side_by_side_figure"};
@{typ "doc_class rexp"};
DOF_core.SPY;\<close>


end

+ 222
- 53
src/DOF/Isa_DOF.thy View File

@@ -33,19 +33,18 @@ theory Isa_DOF (* Isabelle Document Ontology Framework *)
RegExpInterface (* Interface to functional regular automata for monitoring *)
Assert
keywords "+=" ":=" "accepts" "rejects" "invariant"
keywords "+=" ":=" "accepts" "rejects" "invariant"
and "open_monitor*" "close_monitor*" "declare_reference*"
"update_instance*" "doc_class" ::thy_decl
and "open_monitor*" "close_monitor*"
"declare_reference*" "update_instance*"
"doc_class"
"define_shortcut*" "define_macro*" :: thy_decl

and "text*" "text-macro*" :: document_body
and "text*" "text-macro*" :: document_body

and "print_doc_classes" "print_doc_items"
and "print_doc_classes" "print_doc_items"
"print_doc_class_template" "check_doc_global" :: diag

(* experimental *)
and "corrollary*" "proposition*" "lemma*" "theorem*" :: thy_decl
(* -- intended replacement of Isar std commands.*)

@@ -674,6 +673,21 @@ fun print_doc_classes b ctxt =
writeln "=====================================\n\n\n"
end;

fun print_doc_class_tree ctxt P T =
let val {docobj_tab={tab = x, ...},docclass_tab, ...} = get_data ctxt;
val class_tab:(string * docclass_struct)list = (Symtab.dest docclass_tab)
fun is_class_son X (n, dc:docclass_struct) = (X = #inherits_from dc)
fun tree lev ([]:(string * docclass_struct)list) = ""
|tree lev ((n,R)::S) = (if P(lev,n)
then "."^Int.toString lev^" "^(T n)^"{...}.\n"
^ (tree(lev + 1)(filter(is_class_son(SOME([],n)))class_tab))
else "."^Int.toString lev^" ... \n")
^ (tree lev S)
val roots = filter(is_class_son NONE) class_tab
in ".0 .\n" ^ tree 1 roots end



fun check_doc_global (strict_checking : bool) ctxt =
let val {docobj_tab={tab = x, ...}, monitor_tab, ...} = get_data ctxt;
val S = map_filter (fn (s,NONE) => SOME s | _ => NONE) (Symtab.dest x)
@@ -909,8 +923,8 @@ fun ML_isa_check_docitem thy (term, req_ty, pos) =
| _ => error("can not infer type for: "^ name)
in if cid <> DOF_core.default_cid
andalso not(DOF_core.is_subclass ctxt cid req_class)
then error("reference ontologically inconsistent: "^
Position.here pos_decl)
then error("reference ontologically inconsistent: "
^cid^" vs. "^req_class^ Position.here pos_decl)
else ()
end
else err ("faulty reference to docitem: "^name) pos
@@ -1473,44 +1487,6 @@ val _ = Thy_Output.set_meta_args_parser



ML \<open>
local (* dull and dangerous copy from Pure.thy given that these functions are not
globally exported. *)

val long_keyword =
Parse_Spec.includes >> K "" ||
Parse_Spec.long_statement_keyword;

val long_statement =
Scan.optional (Parse_Spec.opt_thm_name ":" --| Scan.ahead long_keyword) Binding.empty_atts --
Scan.optional Parse_Spec.includes [] -- Parse_Spec.long_statement
>> (fn ((binding, includes), (elems, concl)) => (true, binding, includes, elems, concl));

val short_statement =
Parse_Spec.statement -- Parse_Spec.if_statement -- Parse.for_fixes
>> (fn ((shows, assumes), fixes) =>
(false, Binding.empty_atts, [], [Element.Fixes fixes, Element.Assumes assumes],
Element.Shows shows));

fun theorem spec schematic descr =
Outer_Syntax.local_theory_to_proof' spec ("state " ^ descr)
((ODL_Command_Parser.attributes -- (long_statement || short_statement))
>> (fn (_ (* skip *) ,(long, binding, includes, elems, concl)) =>
((if schematic then Specification.schematic_theorem_cmd
else Specification.theorem_cmd )
long Thm.theoremK NONE (K I) binding includes elems concl)));

in

(* Half - fake. activates original Isar commands, but skips meta-arguments for the moment. *)
(* tendance deprecated - see new scholarly paper setup. *)
val _ = theorem @{command_keyword "theorem*"} false "theorem";
val _ = theorem @{command_keyword "lemma*"} false "lemma";
val _ = theorem @{command_keyword "corrollary*"} false "corollary";
val _ = theorem @{command_keyword "proposition*"} false "proposition";

end\<close>

section\<open> Syntax for Ontological Antiquotations (the '' View'' Part II) \<close>

@@ -1533,9 +1509,9 @@ fun check_and_mark ctxt cid_decl (str:{strict_checking: bool}) {inline=inline_re
val markup = docref_markup false name id pos_decl;
val _ = Context_Position.report ctxt pos markup;
(* this sends a report for a ref application to the PIDE interface ... *)
val _ = if cid <> DOF_core.default_cid
andalso not(DOF_core.is_subclass ctxt cid cid_decl)
then error("reference ontologically inconsistent:" ^ Position.here pos_decl)
val _ = if not(DOF_core.is_subclass ctxt cid cid_decl)
then error("reference ontologically inconsistent: "^cid
^" must be subclass of "^cid_decl^ Position.here pos_decl)
else ()
in () end
else if DOF_core.is_declared_oid_global name thy
@@ -1617,7 +1593,6 @@ val _ = Theory.setup
end (* struct *)
\<close>

text\<open> @{thm [] refl}\<close>

ML\<open>
structure AttributeAccess =
@@ -1848,7 +1823,201 @@ val _ =
end (* struct *)
\<close>

text\<open>dfgd\<close>


section\<open>Shortcuts, Macros, Environments\<close>
text\<open>The features described in this section are actually \<^emph>\<open>not\<close> real ISADOF features, rather a
slightly more abstract layer over somewhat buried standard features of the Isabelle document
generator ... (Thanks to Makarius) Conceptually, they are \<^emph>\<open>sub-text-elements\<close>. \<close>

text\<open>This module provides mechanisms to define front-end checked:
\<^enum> \<^emph>\<open>shortcuts\<close>, i.e. machine-checked abbreviations without arguments
that were mapped to user-defined LaTeX code (Example: \<^verbatim>\<open>\ie\<close>)
\<^enum> \<^emph>\<open>macros\<close> with one argument that were mapped to user-defined code. Example: \<^verbatim>\<open>\myurl{bla}\<close>.
The argument can be potentially checked and reports can be sent to PIDE;
if no such checking is desired, this can be expressed by setting the
\<^theory_text>\<open>reportNtest\<close>-parameter to \<^theory_text>\<open>K(K())\<close>.
\<^enum> \<^emph>\<open>macros\<close> with two arguments, potentially independently checked. See above.
Example: \<^verbatim>\<open>\myurl[ding]{dong}\<close>,
\<^enum> \<^emph>\<open>boxes\<close> which are more complex sub-text-elements in the line of the \<^verbatim>\<open>verbatim\<close> or
\<^verbatim>\<open>theory_text\<close> environments.

Note that we deliberately refrained from a code-template definition mechanism for simplicity,
so the patterns were just described by strings. No additional ado with quoting/unquoting
mechanisms ...
\<close>

ML\<open>
structure DOF_lib =
struct
fun define_shortcut name latexshcut =
Thy_Output.antiquotation_raw name (Scan.succeed ())
(fn _ => fn () => Latex.string latexshcut)

(* This is a generalization of the Isabelle2020 function "control_antiquotation" from
document_antiquotations.ML. (Thanks Makarius!) *)
fun define_macro name s1 s2 reportNtest =
Thy_Output.antiquotation_raw_embedded name (Scan.lift Args.cartouche_input)
(fn ctxt =>
fn src => let val () = reportNtest ctxt src
in src |> Latex.enclose_block s1 s2
o Thy_Output.output_document ctxt {markdown = false}
end);

local (* hide away really strange local construction *)
fun enclose_body2 front body1 middle body2 post =
(if front = "" then [] else [Latex.string front]) @ body1 @
(if middle = "" then [] else [Latex.string middle]) @ body2 @
(if post = "" then [] else [Latex.string post]);
in
fun define_macro2 name front middle post reportNtest1 reportNtest2 =
Thy_Output.antiquotation_raw_embedded name (Scan.lift ( Args.cartouche_input
-- Args.cartouche_input))
(fn ctxt =>
fn (src1,src2) => let val () = reportNtest1 ctxt src1
val () = reportNtest2 ctxt src2
val T1 = Thy_Output.output_document ctxt {markdown = false} src1
val T2 = Thy_Output.output_document ctxt {markdown = false} src2
in Latex.block(enclose_body2 front T1 middle T2 post)
end);
end

fun report_text ctxt text =
let val pos = Input.pos_of text in
Context_Position.reports ctxt
[(pos, Markup.language_text (Input.is_delimited text)),
(pos, Markup.raw_text)]
end;

fun report_theory_text ctxt text =
let val keywords = Thy_Header.get_keywords' ctxt;
val _ = report_text ctxt text;
val _ =
Input.source_explode text
|> Token.tokenize keywords {strict = true}
|> maps (Token.reports keywords)
|> Context_Position.reports_text ctxt;
in () end

fun prepare_text ctxt =
Input.source_content #> #1 #> Document_Antiquotation.prepare_lines ctxt;
(* This also produces indent-expansion and changes space to "\_" and the introduction of "\newline",
I believe. Otherwise its in Thy_Output.output_source, the compiler from string to LaTeX.text. *)

fun string_2_text_antiquotation ctxt text =
prepare_text ctxt text
|> Thy_Output.output_source ctxt
|> Thy_Output.isabelle ctxt

fun string_2_theory_text_antiquotation ctxt text =
let
val keywords = Thy_Header.get_keywords' ctxt;
in
prepare_text ctxt text
|> Token.explode0 keywords
|> maps (Thy_Output.output_token ctxt)
|> Thy_Output.isabelle ctxt
end

fun gen_text_antiquotation name reportNcheck compile =
Thy_Output.antiquotation_raw_embedded name (Scan.lift Args.text_input)
(fn ctxt => fn text:Input.source =>
let