Merge branch 'main' into Isabelle_dev
|
@ -83,18 +83,18 @@ build_and_install_manuals()
|
|||
if [ "$DIRTY" = "true" ]; then
|
||||
if [ -z ${ARTIFACT_DIR+x} ]; then
|
||||
echo " * Quick and Dirty Mode (local build)"
|
||||
$ISABELLE build -d . Isabelle_DOF Isabelle_DOF-Example-Scholarly_Paper
|
||||
mkdir -p $ISADOF_WORK_DIR/Isabelle_DOF-Example-Scholarly_Paper/output/
|
||||
cp Isabelle_DOF-Example-Scholarly_Paper/output/document.pdf \
|
||||
$ISADOF_WORK_DIR/Isabelle_DOF-Example-Scholarly_Paper/output/
|
||||
$ISABELLE build -d . Isabelle_DOF Isabelle_DOF-Example-I
|
||||
mkdir -p $ISADOF_WORK_DIR/Isabelle_DOF-Example-I/output/
|
||||
cp Isabelle_DOF-Example-I/output/document.pdf \
|
||||
$ISADOF_WORK_DIR/Isabelle_DOF-Example-I/output/
|
||||
mkdir -p $ISADOF_WORK_DIR/Isabelle_DOF/output/
|
||||
cp Isabelle_DOF/output/document.pdf \
|
||||
$ISADOF_WORK_DIR/Isabelle_DOF/output/;
|
||||
else
|
||||
echo " * Quick and Dirty Mode (running on CI)"
|
||||
mkdir -p $ISADOF_WORK_DIR/Isabelle_DOF-Example-Scholarly_Paper/output/
|
||||
cp $ARTIFACT_DIR/browser_info/Unsorted/Isabelle_DOF-Example-Scholarly_Paper/document.pdf \
|
||||
$ISADOF_WORK_DIR/Isabelle_DOF-Example-Scholarly_Paper/output/
|
||||
mkdir -p $ISADOF_WORK_DIR/Isabelle_DOF-Example-I/output/
|
||||
cp $ARTIFACT_DIR/browser_info/Unsorted/Isabelle_DOF-Example-I/document.pdf \
|
||||
$ISADOF_WORK_DIR/Isabelle_DOF-Example-I/output/
|
||||
|
||||
mkdir -p $ISADOF_WORK_DIR/Isabelle_DOF/output/
|
||||
cp $ARTIFACT_DIR/browser_info/Unsorted/Isabelle_DOF/document.pdf \
|
||||
|
@ -111,9 +111,9 @@ build_and_install_manuals()
|
|||
$ISADOF_WORK_DIR/doc/Isabelle_DOF-Manual.pdf
|
||||
echo " Isabelle_DOF-Manual User and Implementation Manual for Isabelle/DOF" >> $ISADOF_WORK_DIR/doc/Contents
|
||||
|
||||
cp $ISADOF_WORK_DIR/Isabelle_DOF-Example-Scholarly_Paper/output/document.pdf \
|
||||
$ISADOF_WORK_DIR/doc/Isabelle_DOF-Example-Scholarly_Paper.pdf
|
||||
echo " Isabelle_DOF-Example-Scholarly_Paper Example academic paper" >> $ISADOF_WORK_DIR/doc/Contents
|
||||
cp $ISADOF_WORK_DIR/Isabelle_DOF-Example-I/output/document.pdf \
|
||||
$ISADOF_WORK_DIR/doc/Isabelle_DOF-Example-I.pdf
|
||||
echo " Isabelle_DOF-Example-I Example academic paper" >> $ISADOF_WORK_DIR/doc/Contents
|
||||
|
||||
find $ISADOF_WORK_DIR -type d -name "output" -exec rm -rf {} \; &> /dev/null || true
|
||||
rm -rf $ISADOF_WORK_DIR/.git* $ISADOF_WORK_DIR/.woodpecker $ISADOF_WORK_DIR/.afp
|
||||
|
|
|
@ -1 +0,0 @@
|
|||
2020-iFM-CSP
|
|
@ -1,4 +1,4 @@
|
|||
session "Isabelle_DOF-Example-Scholarly_Paper" (AFP) = "Isabelle_DOF" +
|
||||
session "Isabelle_DOF-Example-I" (AFP) = "Isabelle_DOF" +
|
||||
options [document = pdf, document_output = "output", document_build = dof]
|
||||
theories
|
||||
IsaDofApplications
|
Before Width: | Height: | Size: 14 KiB After Width: | Height: | Size: 14 KiB |
Before Width: | Height: | Size: 18 KiB After Width: | Height: | Size: 18 KiB |
Before Width: | Height: | Size: 23 KiB After Width: | Height: | Size: 23 KiB |
Before Width: | Height: | Size: 85 KiB After Width: | Height: | Size: 85 KiB |
Before Width: | Height: | Size: 16 KiB After Width: | Height: | Size: 16 KiB |
Before Width: | Height: | Size: 18 KiB After Width: | Height: | Size: 18 KiB |
Before Width: | Height: | Size: 75 KiB After Width: | Height: | Size: 75 KiB |
Before Width: | Height: | Size: 96 KiB After Width: | Height: | Size: 96 KiB |
Before Width: | Height: | Size: 57 KiB After Width: | Height: | Size: 57 KiB |
Before Width: | Height: | Size: 67 KiB After Width: | Height: | Size: 67 KiB |
Before Width: | Height: | Size: 50 KiB After Width: | Height: | Size: 50 KiB |
|
@ -1,4 +1,4 @@
|
|||
session "2020-iFM-csp" = "Isabelle_DOF" +
|
||||
session "Isabelle_DOF-Example-II" = "Isabelle_DOF" +
|
||||
options [document = pdf, document_output = "output", document_build = dof]
|
||||
theories
|
||||
"paper"
|
|
@ -12,10 +12,13 @@ declare[[ strict_monitor_checking = false]]
|
|||
declare[[ Definition_default_class = "definition"]]
|
||||
declare[[ Lemma_default_class = "lemma"]]
|
||||
declare[[ Theorem_default_class = "theorem"]]
|
||||
declare[[ Corollary_default_class = "corollary"]]
|
||||
|
||||
define_shortcut* csp \<rightleftharpoons> \<open>CSP\<close>
|
||||
holcsp \<rightleftharpoons> \<open>HOL-CSP\<close>
|
||||
isabelle \<rightleftharpoons> \<open>Isabelle/HOL\<close>
|
||||
hfill \<rightleftharpoons> \<open>\hfill\<close>
|
||||
br \<rightleftharpoons> \<open>\break\<close>
|
||||
|
||||
(*>*)
|
||||
|
||||
|
@ -27,7 +30,7 @@ author*[lina,email="\<open>lina.ye@lri.fr\<close>",affiliation="\<open>LRI, Inri
|
|||
|
||||
abstract*[abs, keywordlist="[\<open>Shallow Embedding\<close>,\<open>Process-Algebra\<close>,
|
||||
\<open>Concurrency\<close>,\<open>Computational Models\<close>]"]
|
||||
\<open> The theory of Communicating Sequential Processes going back to Hoare and Roscoe is still today
|
||||
\<open> The theory of Communicating Sequential Processes going back to Hoare and Roscoe is still today
|
||||
one of the reference theories for concurrent specification and computing. In 1997, a first
|
||||
formalization in \<^isabelle> of the denotational semantics of the Failure/Divergence Model of
|
||||
\<^csp> was undertaken; in particular, this model can cope with infinite alphabets, in contrast
|
||||
|
@ -123,24 +126,10 @@ attempt to formalize denotational \<^csp> semantics covering a part of Bill Rosc
|
|||
\<^url>\<open>https://gitlri.lri.fr/burkhart.wolff/hol-csp2.0\<close>. In this paper, all Isabelle proofs are
|
||||
omitted.\<close>}.
|
||||
\<close>
|
||||
(*
|
||||
% Moreover, decomposition rules of the form:
|
||||
% \begin{center}
|
||||
% \begin{minipage}[c]{10cm}
|
||||
% @{cartouche [display] \<open>C \<Longrightarrow> A \<sqsubseteq>\<^sub>F\<^sub>D A' \<Longrightarrow> B \<sqsubseteq>\<^sub>F\<^sub>D B' \<Longrightarrow> A \<lbrakk>S\<rbrakk> B \<sqsubseteq>\<^sub>F\<^sub>D A' \<lbrakk>S\<rbrakk> B'\<close>}
|
||||
% \end{minipage}
|
||||
% \end{center}
|
||||
% are of particular interest since they allow to avoid the costly automata-product construction
|
||||
% of model-checkers and to separate infinite sub-systems from finite (model-checkable) ones; however,
|
||||
% their side-conditions \<open>C\<close> are particularly tricky to work out. Decomposition rules may pave the
|
||||
% way for future tool combinations for model-checkers such as FDR4~@{cite "fdr4"} or
|
||||
% PAT~@{cite "SunLDP09"} based on proof certifications.*)
|
||||
|
||||
section*["pre"::tc,main_author="Some(@{docitem \<open>bu\<close>}::author)"]
|
||||
\<open>Preliminaries\<close>
|
||||
|
||||
text\<open>\<close>
|
||||
|
||||
subsection*[cspsemantics::tc, main_author="Some(@{docitem ''bu''})"]\<open>Denotational \<^csp> Semantics\<close>
|
||||
|
||||
text\<open> The denotational semantics (following @{cite "roscoe:csp:1998"}) comes in three layers:
|
||||
|
@ -183,7 +172,6 @@ The following process \<open>P\<^sub>i\<^sub>n\<^sub>f\<close> is an infinite pr
|
|||
many times. However, using the \<^csp> hiding operator \<open>_\_\<close>, this activity is concealed:
|
||||
|
||||
\<^enum> \<open>P\<^sub>i\<^sub>n\<^sub>f = (\<mu> X. a \<rightarrow> X) \ {a}\<close>
|
||||
|
||||
\<close>
|
||||
|
||||
text\<open>where \<open>P\<^sub>i\<^sub>n\<^sub>f\<close> will be equivalent to \<open>\<bottom>\<close> in the process cpo ordering.
|
||||
|
@ -214,7 +202,6 @@ in the plethora of work done and has been a key factor for the success of the Ar
|
|||
For the work presented here, one relevant construction is :
|
||||
|
||||
\<^item> \<^theory_text>\<open>typedef (\<alpha>\<^sub>1,...,\<alpha>\<^sub>n)t = E\<close>
|
||||
|
||||
|
||||
It creates a fresh type that is isomorphic to a set \<open>E\<close> involving \<open>\<alpha>\<^sub>1,...,\<alpha>\<^sub>n\<close> types.
|
||||
Isabelle/HOL performs a number of syntactic checks for these constructions that guarantee the logical
|
||||
|
@ -263,7 +250,6 @@ Informally, these are:
|
|||
|
||||
More formally, a process \<open>P\<close> of the type \<open>\<Sigma> process\<close> should have the following properties:
|
||||
|
||||
|
||||
@{cartouche [display] \<open>([],{}) \<in> \<F> P \<and>
|
||||
(\<forall> s X. (s,X) \<in> \<F> P \<longrightarrow> front_tickFree s) \<and>
|
||||
(\<forall> s t . (s@t,{}) \<in> \<F> P \<longrightarrow> (s,{}) \<in> \<F> P) \<and>
|
||||
|
@ -281,8 +267,8 @@ Forth, we turn our wishlist of "axioms" above into the definition of a predicate
|
|||
of type \<open>\<Sigma> process\<^sub>0 \<Rightarrow> bool\<close> deciding if its conditions are fulfilled. Since \<open>P\<close> is a pre-process,
|
||||
we replace \<open>\<F>\<close> by \<open>fst\<close> and \<open>\<D>\<close> by \<open>snd\<close> (the HOL projections into a pair).
|
||||
And last not least fifth, we use the following type definition:
|
||||
\<^item> \<^theory_text>\<open>typedef '\<alpha> process = "{P :: '\<alpha> process\<^sub>0 . is_process P}"\<close>
|
||||
|
||||
\<^item> \<^theory_text>\<open>typedef '\<alpha> process = "{P :: '\<alpha> process\<^sub>0 . is_process P}"\<close>
|
||||
|
||||
Isabelle requires a proof for the existence of a witness for this set,
|
||||
but this can be constructed in a straight-forward manner. Suitable definitions for
|
||||
|
@ -303,11 +289,12 @@ For example, we define \<open>_\<sqinter>_\<close> on the pre-process type as fo
|
|||
|
||||
\<^item> \<^theory_text>\<open>definition "P \<sqinter> Q \<equiv> Abs_process(\<F> P \<union> \<F> Q , \<D> P \<union> \<D> Q)"\<close>
|
||||
|
||||
where \<open>\<F> = fst \<circ> Rep_process\<close> and \<open>\<D> = snd \<circ> Rep_process\<close> and where \<open>Rep_process\<close> and
|
||||
\<open>Abs_process\<close> are the representation and abstraction morphisms resulting from the
|
||||
type definition linking \<open>'\<alpha> process\<close> isomorphically to \<open>'\<alpha> process\<^sub>0\<close>. Proving the above properties
|
||||
for \<open>\<F> (P \<sqinter> Q)\<close> and \<open>\<D> (P \<sqinter> Q)\<close> requires a proof that \<open>(\<F> P \<union> \<F> Q , \<D> P \<union> \<D> Q)\<close>
|
||||
satisfies the 9 "axioms", which is fairly simple in this case.
|
||||
where \<open>Rep_process\<close> and \<open>Abs_process\<close> are the representation and abstraction morphisms resulting
|
||||
from the type definition linking the type \<open>'\<alpha> process\<close> isomorphically to the set \<open>'\<alpha> process\<^sub>0\<close>.
|
||||
The projection into \<^emph>\<open>failures\<close> is defined by \<open>\<F> = fst \<circ> Rep_process\<close>, whereas the
|
||||
\<^emph>\<open>divergences\<close> are defined bz \<open>\<D> = snd \<circ> Rep_process\<close>. Proving the above properties for
|
||||
\<open>\<F> (P \<sqinter> Q)\<close> and \<open>\<D> (P \<sqinter> Q)\<close> requires a proof that \<open>(\<F> P \<union> \<F> Q , \<D> P \<union> \<D> Q)\<close>
|
||||
satisfies the well-formedness conditions of \<open>is_process\<close>, which is fairly simple in this case.
|
||||
|
||||
The definitional presentation of the \<^csp> process operators according to @{cite "roscoe:csp:1998"}
|
||||
follows always this scheme. This part of the theory comprises around 2000 loc.
|
||||
|
@ -322,15 +309,16 @@ a conversion of processes in terms of (finite) labelled transition systems leads
|
|||
model-checking techniques based on graph-exploration. Essentially, a process \<open>P\<close> \<^emph>\<open>refines\<close>
|
||||
another process \<open>Q\<close> if and only if it is more deterministic and more defined (has less divergences).
|
||||
Consequently, each of the three semantics models (trace, failure and failure/divergence)
|
||||
has its corresponding refinement orderings.
|
||||
has its corresponding refinement orderings.\<close>
|
||||
Theorem*[th1::"theorem", short_name="\<open>Refinement properties\<close>"]\<open>
|
||||
What we are interested in this paper is the following refinement orderings for the
|
||||
failure/divergence model.
|
||||
|
||||
\<^enum> \<open>P \<sqsubseteq>\<^sub>\<F>\<^sub>\<D> Q \<equiv> \<F> P \<supseteq> \<F> Q \<and> \<D> P \<supseteq> \<D> Q\<close>
|
||||
\<^enum> \<open>P \<sqsubseteq>\<^sub>\<T>\<^sub>\<D> Q \<equiv> \<T> P \<supseteq> \<T> Q \<and> \<D> P \<supseteq> \<D> Q\<close>
|
||||
\<^enum> \<open>P \<sqsubseteq>\<^sub>\<FF> Q \<equiv> \<FF> P \<supseteq> \<FF> Q, \<FF>\<in>{\<T>,\<F>,\<D>}\<close>
|
||||
\<^enum> \<open>P \<sqsubseteq>\<^sub>\<FF> Q \<equiv> \<FF> P \<supseteq> \<FF> Q, \<FF>\<in>{\<T>,\<F>,\<D>}\<close> \<close>
|
||||
|
||||
Notice that in the \<^csp> literature, only \<open>\<sqsubseteq>\<^sub>\<F>\<^sub>\<D>\<close> is well studied for failure/divergence model.
|
||||
text\<open> Notice that in the \<^csp> literature, only \<open>\<sqsubseteq>\<^sub>\<F>\<^sub>\<D>\<close> is well studied for failure/divergence model.
|
||||
Our formal analysis of different granularities on the refinement orderings
|
||||
allows deeper understanding of the same semantics model. For example, \<open>\<sqsubseteq>\<^sub>\<T>\<^sub>\<D>\<close> turns
|
||||
out to have in some cases better monotonicity properties and therefore allow for stronger proof
|
||||
|
@ -356,10 +344,9 @@ is based on the concept \<^emph>\<open>refusals after\<close> a trace \<open>s\<
|
|||
|
||||
Definition*[process_ordering, level= "Some 2", short_name="''process ordering''"]\<open>
|
||||
We define \<open>P \<sqsubseteq> Q \<equiv> \<psi>\<^sub>\<D> \<and> \<psi>\<^sub>\<R> \<and> \<psi>\<^sub>\<M> \<close>, where
|
||||
\<^enum> \<open>\<psi>\<^sub>\<D> = \<D> P \<supseteq> \<D> Q \<close>
|
||||
\<^enum> \<open>\<psi>\<^sub>\<D> = \<D> P \<supseteq> \<D> Q \<close>
|
||||
\<^enum> \<open>\<psi>\<^sub>\<R> = s \<notin> \<D> P \<Rightarrow> \<R> P s = \<R> Q s\<close>
|
||||
\<^enum> \<open>\<psi>\<^sub>\<M> = Mins(\<D> P) \<subseteq> \<T> Q \<close>
|
||||
\<close>
|
||||
\<^enum> \<open>\<psi>\<^sub>\<M> = Mins(\<D> P) \<subseteq> \<T> Q \<close> \<close>
|
||||
|
||||
text\<open>The third condition \<open>\<psi>\<^sub>\<M>\<close> implies that the set of minimal divergent traces
|
||||
(ones with no proper prefix that is also a divergence) in \<open>P\<close>, denoted by \<open>Mins(\<D> P)\<close>,
|
||||
|
@ -395,44 +382,45 @@ For most \<^csp> operators \<open>\<otimes>\<close> we derived rules of the form
|
|||
|
||||
These rules allow to automatically infer for any process term if it is continuous or not.
|
||||
The port of HOL-CSP 2 on HOLCF implied that the derivation of the entire continuity rules
|
||||
had to be completely re-done (3000 loc).
|
||||
|
||||
|
||||
HOL-CSP provides an important proof principle, the fixed-point induction:
|
||||
had to be completely re-done (3000 loc).\<close>
|
||||
|
||||
Theorem*[th2,short_name="\<open>Fixpoint Induction\<close>"]
|
||||
\<open>HOL-CSP provides an important proof principle, the fixed-point induction:
|
||||
@{cartouche [display, indent=5] \<open>cont f \<Longrightarrow> adm P \<Longrightarrow> P \<bottom> \<Longrightarrow> (\<And>X. P X \<Longrightarrow> P(f X)) \<Longrightarrow> P(\<mu>X. f X)\<close>}
|
||||
\<close>
|
||||
|
||||
Fixed-point induction requires a small side-calculus for establishing the admissibility
|
||||
text\<open>Fixed-point induction of @{theorem th2} requires a small side-calculus for establishing the admissibility
|
||||
of a predicate; basically, predicates are admissible if they are valid for any least upper bound
|
||||
of a chain \<open>x\<^sub>1 \<sqsubseteq> x\<^sub>2 \<sqsubseteq> x\<^sub>3 ... \<close> provided that \<open>\<forall>i. P(x\<^sub>i)\<close>. It turns out that \<open>_\<sqsubseteq>_\<close> and \<open>_\<sqsubseteq>\<^sub>F\<^sub>D_\<close> as
|
||||
well as all other refinement orderings that we introduce in this paper are admissible.
|
||||
Fixed-point inductions are the main proof weapon in verifications,
|
||||
together with monotonicities and the \<^csp> laws. Denotational arguments can be hidden as they are not
|
||||
needed in practical verifications. \<close>
|
||||
Fixed-point inductions are the main proof weapon in verifications, together with monotonicities
|
||||
and the \<^csp> laws. Denotational arguments can be hidden as they are not needed in practical
|
||||
verifications. \<close>
|
||||
|
||||
subsection*["law"::tc, main_author="Some(@{docitem ''lina''})"]
|
||||
\<open>\<^csp> Rules: Improved Proofs and New Results\<close>
|
||||
|
||||
|
||||
text\<open> The \<^csp> operators enjoy a number of algebraic properties: commutativity,
|
||||
text\<open>The \<^csp> operators enjoy a number of algebraic properties: commutativity,
|
||||
associativities, and idempotence in some cases. Moreover, there is a rich body of distribution
|
||||
laws between these operators. Our new version HOL-CSP 2 not only shortens and restructures the
|
||||
proofs of @{cite "tej.ea:corrected:1997"}; the code reduces
|
||||
to 8000 loc from 25000 loc. Some illustrative examples of new established rules are:
|
||||
proofs of @{cite "tej.ea:corrected:1997"}; the code reduces to 8000 loc from 25000 loc. \<close>
|
||||
|
||||
Theorem*[th3, short_name="\<open>Examples of Derived Rules.\<close>"]\<open>
|
||||
\<^item> \<open>\<box>x\<in>A\<union>B\<rightarrow>P(x) = (\<box>x\<in>A\<rightarrow>P x) \<box> (\<box>x\<in>B\<rightarrow>P x)\<close>
|
||||
\<^item> \<open>A\<union>B\<subseteq>C \<Longrightarrow> (\<box>x\<in>A\<rightarrow>P x \<lbrakk>C\<rbrakk> \<box>x\<in>B\<rightarrow>Q x) = \<box>x\<in>A\<inter>B\<rightarrow>(P x \<lbrakk>C\<rbrakk> Q x)\<close>
|
||||
\<^item> @{cartouche [display]\<open>A\<subseteq>C \<Longrightarrow> B\<inter>C={} \<Longrightarrow>
|
||||
(\<box>x\<in>A\<rightarrow>P x \<lbrakk>C\<rbrakk> \<box>x\<in>B\<rightarrow>Q x) = \<box>x\<in>B\<rightarrow>(\<box>x\<in>A\<rightarrow>P x \<lbrakk>C\<rbrakk> Q x)\<close>}
|
||||
\<^item> \<open>finite A \<Longrightarrow> A\<inter>C = {} \<Longrightarrow> ((P \<lbrakk>C\<rbrakk> Q) \ A) = ((P \ A) \<lbrakk>C\<rbrakk> (Q \ A)) ...\<close>
|
||||
\<^item> \<open>finite A \<Longrightarrow> A\<inter>C = {} \<Longrightarrow> ((P \<lbrakk>C\<rbrakk> Q) \ A) = ((P \ A) \<lbrakk>C\<rbrakk> (Q \ A)) ...\<close>\<close>
|
||||
|
||||
The continuity proof of the hiding operator is notorious. The proof is known
|
||||
to involve the classical König's lemma stating that every infinite tree with finite branching
|
||||
has an infinite path. We adapt this lemma to our context as follows:
|
||||
|
||||
@{cartouche [display, indent=5]
|
||||
text\<open>The continuity proof of the hiding operator is notorious. The proof is known to involve the
|
||||
classical König's lemma stating that every infinite tree with finite branching has an infinite path.
|
||||
We adapt this lemma to our context as follows:
|
||||
|
||||
@{cartouche [display, indent=5]
|
||||
\<open>infinite tr \<Longrightarrow> \<forall>i. finite{t. \<exists>t'\<in>tr. t = take i t'}
|
||||
\<Longrightarrow> \<exists> f. strict_mono f \<and> range f \<subseteq> {t. \<exists>t'\<in>tr. t \<le> t'}\<close>}
|
||||
\<Longrightarrow> \<exists> f. strict_mono f \<and> range f \<subseteq> {t. \<exists>t'\<in>tr. t \<le> t'}\<close>}
|
||||
|
||||
in order to come up with the continuity rule: \<open>finite S \<Longrightarrow> cont P \<Longrightarrow> cont(\<lambda>X. P X \ S)\<close>.
|
||||
The original proof had been drastically shortened by a factor 10 and important immediate steps
|
||||
|
@ -476,44 +464,20 @@ under all refinement orderings, while others are not.
|
|||
\<^item> Sequence operator is not monotonic under \<open>\<sqsubseteq>\<^sub>\<F>\<close>, \<open>\<sqsubseteq>\<^sub>\<D>\<close> or \<open>\<sqsubseteq>\<^sub>\<T>\<close>:
|
||||
@{cartouche [display,indent=5]
|
||||
\<open>P \<sqsubseteq>\<^sub>\<FF> P'\<Longrightarrow> Q \<sqsubseteq>\<^sub>\<FF> Q' \<Longrightarrow> (P ; Q) \<sqsubseteq>\<^sub>\<FF> (P' ; Q') where \<FF>\<in>{\<T>\<D>,\<F>\<D>}\<close>}
|
||||
%All refinements are right-side monotonic but \<open>\<sqsubseteq>\<^sub>\<F>\<close>, \<open>\<sqsubseteq>\<^sub>\<D>\<close> and \<open>\<sqsubseteq>\<^sub>\<T>\<close> are not left-side monotonic,
|
||||
%which can be explained by
|
||||
%the interdependence relationship of failure and divergence projections for the first component.
|
||||
%We thus proved:
|
||||
|
||||
All refinements are right-side monotonic but \<open>\<sqsubseteq>\<^sub>\<F>\<close>, \<open>\<sqsubseteq>\<^sub>\<D>\<close> and \<open>\<sqsubseteq>\<^sub>\<T>\<close> are not left-side monotonic,
|
||||
which can be explained by the interdependence relationship of failure and divergence projections
|
||||
for the first component. We thus proved:
|
||||
\<^item> Hiding operator is not monotonic under \<open>\<sqsubseteq>\<^sub>\<D>\<close>:
|
||||
@{cartouche [display,indent=5] \<open>P \<sqsubseteq>\<^sub>\<FF> Q \<Longrightarrow> P \ A \<sqsubseteq>\<^sub>\<FF> Q \ A where \<FF>\<in>{\<T>,\<F>,\<T>\<D>,\<F>\<D>}\<close>}
|
||||
%Intuitively, for the divergence refinement of the hiding operator, there may be
|
||||
%some trace \<open>s\<in>\<T> Q\<close> and \<open>s\<notin>\<T> P\<close> such that it becomes divergent in \<open>Q \ A\<close> but
|
||||
%not in \<open>P \ A\<close>.
|
||||
%when the condition in the corresponding projection laws is satisfied, which makes it is not monotonic.
|
||||
Intuitively, for the divergence refinement of the hiding operator, there may be
|
||||
some trace \<open>s\<in>\<T> Q\<close> and \<open>s\<notin>\<T> P\<close> such that it becomes divergent in \<open>Q \ A\<close> but
|
||||
not in \<open>P \ A\<close>.
|
||||
\<^item> Parallel composition is not monotonic under \<open>\<sqsubseteq>\<^sub>\<F>\<close>, \<open>\<sqsubseteq>\<^sub>\<D>\<close> or \<open>\<sqsubseteq>\<^sub>\<T>\<close>:
|
||||
@{cartouche [display,indent=5] \<open>P \<sqsubseteq>\<^sub>\<FF> P' \<Longrightarrow> Q \<sqsubseteq>\<^sub>\<FF> Q' \<Longrightarrow> (P \<lbrakk>A\<rbrakk> Q) \<sqsubseteq>\<^sub>\<FF> (P' \<lbrakk>A\<rbrakk> Q') where \<FF>\<in>{\<T>\<D>,\<F>\<D>}\<close>}
|
||||
%The failure and divergence projections of this operator are also interdependent, similar to the
|
||||
%sequence operator.
|
||||
%Hence, this operator is not monotonic with \<open>\<sqsubseteq>\<^sub>\<F>\<close>, \<open>\<sqsubseteq>\<^sub>\<D>\<close> and \<open>\<sqsubseteq>\<^sub>\<T>\<close>, but monotonic when their
|
||||
%combinations are considered.
|
||||
|
||||
\<close>
|
||||
|
||||
(* Besides the monotonicity results on the above \<^csp> operators,
|
||||
we have also proved that for other \<^csp> operators, such as multi-prefix and non-deterministic choice,
|
||||
they are all monotonic with these five refinement orderings. Such theoretical results provide significant indicators
|
||||
for semantics choices when considering specification decomposition.
|
||||
We want to emphasize that this is the first work on such substantial
|
||||
analysis in a formal way, as far as we know.
|
||||
|
||||
%In the literature, these processes are defined in a way that does not distinguish the special event \<open>tick\<close>. To be consistent with the idea that ticks should be distinguished on the semantic level, besides the above
|
||||
three processes,
|
||||
|
||||
one can directly prove 3 since for both \<open>CHAOS\<close> and \<open>DF\<close>,
|
||||
the version with \<open>SKIP\<close> is constructed exactly in the same way from that without \<open>SKIP\<close>.
|
||||
And 4 is obtained based on the projection laws of internal choice \<open>\<sqinter>\<close>.
|
||||
Finally, for 5, the difference between \<open>DF\<close> and \<open>RUN\<close> is that the former applies internal choice
|
||||
while the latter with external choice. From the projection laws of both operators,
|
||||
the failure set of \<open>RUN\<close> has more constraints, thus being a subset of that of \<open>DF\<close>,
|
||||
when the divergence set is empty, which is true for both processes.
|
||||
|
||||
*)
|
||||
The failure and divergence projections of this operator are also interdependent, similar to the
|
||||
sequence operator. Hence, this operator is not monotonic with \<open>\<sqsubseteq>\<^sub>\<F>\<close>, \<open>\<sqsubseteq>\<^sub>\<D>\<close> and \<open>\<sqsubseteq>\<^sub>\<T>\<close>, but monotonic
|
||||
when their combinations are considered. \<close>
|
||||
|
||||
subsection*["processes"::tc,main_author="Some(@{docitem ''safouan''}::author)",
|
||||
main_author="Some(@{docitem ''lina''}::author)"]
|
||||
|
@ -549,43 +513,40 @@ Definition*[X6, level="Some 2"]\<open>\<open>DF\<^sub>S\<^sub>K\<^sub>I\<^sub>P
|
|||
|
||||
text\<open>In the following, we denote \<open> \<R>\<P> = {DF\<^sub>S\<^sub>K\<^sub>I\<^sub>P, DF, RUN, CHAOS, CHAOS\<^sub>S\<^sub>K\<^sub>I\<^sub>P}\<close>.
|
||||
All five reference processes are divergence-free.
|
||||
%which was done by using a particular lemma \<open>\<D> (\<mu> x. f x) = \<Inter>\<^sub>i\<^sub>\<in>\<^sub>\<nat> \<D> (f\<^sup>i \<bottom>)\<close>.
|
||||
which was proven by using a particular lemma \<open>\<D> (\<mu> x. f x) = \<Inter>\<^sub>i\<^sub>\<in>\<^sub>\<nat> \<D> (f\<^sup>i \<bottom>)\<close>.
|
||||
@{cartouche
|
||||
[display,indent=8] \<open> D (\<PP> UNIV) = {} where \<PP> \<in> \<R>\<P> and UNIV is the set of all events\<close>
|
||||
}
|
||||
Regarding the failure refinement ordering, the set of failures \<open>\<F> P\<close> for any process \<open>P\<close> is
|
||||
a subset of \<open>\<F> (CHAOS\<^sub>S\<^sub>K\<^sub>I\<^sub>P UNIV)\<close>.% and the following lemma was proved:
|
||||
% This proof is performed by induction, based on the failure projection of \<open>STOP\<close> and that of
|
||||
% internal choice.
|
||||
|
||||
a subset of \<open>\<F> (CHAOS\<^sub>S\<^sub>K\<^sub>I\<^sub>P UNIV)\<close>.
|
||||
|
||||
@{cartouche [display, indent=25] \<open>CHAOS\<^sub>S\<^sub>K\<^sub>I\<^sub>P UNIV \<sqsubseteq>\<^sub>\<F> P\<close>}
|
||||
|
||||
|
||||
\<^noindent> Furthermore, the following 5 relationships were demonstrated from monotonicity results and
|
||||
a denotational proof.
|
||||
%among which 1 and 2 are immediate corollaries,
|
||||
%4 and 5 are directly obtained from our monotonicity results while 3 requires a denotational proof.
|
||||
and thanks to transitivity, we can derive other relationships.
|
||||
Furthermore, the following 5 relationships were demonstrated from monotonicity results and
|
||||
a denotational proof.
|
||||
\<close>
|
||||
|
||||
|
||||
Corollary*[co1::"corollary", short_name="\<open>Corollaries on reference processes.\<close>",level="Some 2"]
|
||||
\<open> \<^hfill> \<^br> \<^vs>\<open>-0.3cm\<close>
|
||||
\<^enum> \<open>CHAOS\<^sub>S\<^sub>K\<^sub>I\<^sub>P A \<sqsubseteq>\<^sub>\<F> CHAOS A\<close>
|
||||
\<^enum> \<open>CHAOS\<^sub>S\<^sub>K\<^sub>I\<^sub>P A \<sqsubseteq>\<^sub>\<F> DF\<^sub>S\<^sub>K\<^sub>I\<^sub>P A\<close>
|
||||
\<^enum> \<open>CHAOS A \<sqsubseteq>\<^sub>\<F> DF A\<close>
|
||||
\<^enum> \<open>DF\<^sub>S\<^sub>K\<^sub>I\<^sub>P A \<sqsubseteq>\<^sub>\<F> DF A\<close>
|
||||
\<^enum> \<open>DF A \<sqsubseteq>\<^sub>\<F> RUN A\<close>
|
||||
\<^enum> \<open>DF A \<sqsubseteq>\<^sub>\<F> RUN A\<close> \<^vs>\<open>0.3cm\<close>
|
||||
|
||||
where 1 and 2 are immediate, and where 4 and 5 are directly obtained from our monotonicity
|
||||
results while 3 requires an argument over the denotational space.
|
||||
Thanks to transitivity, we can derive other relationships.\<close>
|
||||
|
||||
Last, regarding trace refinement, for any process P,
|
||||
text\<open> Lastly, regarding trace refinement, for any process P,
|
||||
its set of traces \<open>\<T> P\<close> is a subset of \<open>\<T> (CHAOS\<^sub>S\<^sub>K\<^sub>I\<^sub>P UNIV)\<close> and of \<open>\<T> (DF\<^sub>S\<^sub>K\<^sub>I\<^sub>P UNIV)\<close> as well.
|
||||
%As we already proved that \<open>CHAOS\<^sub>S\<^sub>K\<^sub>I\<^sub>P\<close> covers all failures,
|
||||
%we can immediately infer that it also covers all traces.
|
||||
%The \<open>DF\<^sub>S\<^sub>K\<^sub>I\<^sub>P\<close> case requires a longer denotational proof.
|
||||
|
||||
|
||||
\<^enum> \<open>CHAOS\<^sub>S\<^sub>K\<^sub>I\<^sub>P UNIV \<sqsubseteq>\<^sub>\<T> P\<close>
|
||||
\<^enum> \<open>DF\<^sub>S\<^sub>K\<^sub>I\<^sub>P UNIV \<sqsubseteq>\<^sub>\<T> P\<close>
|
||||
|
||||
\<close>
|
||||
|
||||
text\<open>
|
||||
|
@ -598,16 +559,12 @@ verification. For example, if one wants to establish that a protocol implementat
|
|||
a non-deterministic specification \<open>SPEC\<close> it suffices to ask if \<open>IMPL || SPEC\<close> is deadlock-free.
|
||||
In this setting, \<open>SPEC\<close> becomes a kind of observer that signals non-conformance of \<open>IMPL\<close> by
|
||||
deadlock.
|
||||
% A livelocked system looks similar to a deadlocked one from an external point of view.
|
||||
% However, livelock is sometimes considered as worse since the user may be able to observe the internal
|
||||
% activities and so hope that some output will happen eventually.
|
||||
|
||||
In the literature, deadlock and lifelock are phenomena that are often
|
||||
handled separately. One contribution of our work is establish their precise relationship inside
|
||||
the Failure/Divergence Semantics of \<^csp>.\<close>
|
||||
|
||||
(* bizarre: Definition* does not work for this single case *)
|
||||
text*[X10::"definition", level="Some 2"]\<open> \<open>deadlock\<^sub>-free P \<equiv> DF\<^sub>S\<^sub>K\<^sub>I\<^sub>P UNIV \<sqsubseteq>\<^sub>\<F> P\<close> \<close>
|
||||
Definition*[X10::"definition", level="Some 2"]\<open> \<open>deadlock\<^sub>-free P \<equiv> DF\<^sub>S\<^sub>K\<^sub>I\<^sub>P UNIV \<sqsubseteq>\<^sub>\<F> P\<close> \<close>
|
||||
|
||||
text\<open>\<^noindent> A process \<open>P\<close> is deadlock-free if and only if after any trace \<open>s\<close> without \<open>\<surd>\<close>, the union of \<open>\<surd>\<close>
|
||||
and all events of \<open>P\<close> can never be a refusal set associated to \<open>s\<close>, which means that \<open>P\<close> cannot
|
||||
|
@ -615,15 +572,14 @@ be deadlocked after any non-terminating trace.
|
|||
\<close>
|
||||
|
||||
Theorem*[T1, short_name="\<open>DF definition captures deadlock-freeness\<close>", level="Some 2"]
|
||||
\<open> \hfill \break \<open>deadlock_free P \<longleftrightarrow> (\<forall>s\<in>\<T> P. tickFree s \<longrightarrow> (s, {\<surd>}\<union>events_of P) \<notin> \<F> P)\<close> \<close>
|
||||
\<open> \<^hfill> \<^br> \<open>deadlock_free P \<longleftrightarrow> (\<forall>s\<in>\<T> P. tickFree s \<longrightarrow> (s, {\<surd>}\<union>events_of P) \<notin> \<F> P)\<close> \<close>
|
||||
Definition*[X11, level="Some 2"]\<open> \<open>livelock\<^sub>-free P \<equiv> \<D> P = {} \<close> \<close>
|
||||
|
||||
text\<open> Recall that all five reference processes are livelock-free.
|
||||
We also have the following lemmas about the
|
||||
livelock-freeness of processes:
|
||||
\<^enum> \<open>livelock\<^sub>-free P \<longleftrightarrow> \<PP> UNIV \<sqsubseteq>\<^sub>\<D> P where \<PP> \<in> \<R>\<P>\<close>
|
||||
\<^enum> @{cartouche [display]\<open>livelock\<^sub>-free P \<longleftrightarrow> DF\<^sub>S\<^sub>K\<^sub>I\<^sub>P UNIV \<sqsubseteq>\<^sub>\<T>\<^sub>\<D> P
|
||||
\<longleftrightarrow> CHAOS\<^sub>S\<^sub>K\<^sub>I\<^sub>P UNIV \<sqsubseteq>\<^sub>\<T>\<^sub>\<D> P\<close>}
|
||||
\<^enum> \<open>livelock\<^sub>-free P \<longleftrightarrow> DF\<^sub>S\<^sub>K\<^sub>I\<^sub>P UNIV \<sqsubseteq>\<^sub>\<T>\<^sub>\<D> P \<longleftrightarrow> CHAOS\<^sub>S\<^sub>K\<^sub>I\<^sub>P UNIV \<sqsubseteq>\<^sub>\<T>\<^sub>\<D> P\<close>
|
||||
\<^enum> \<open>livelock\<^sub>-free P \<longleftrightarrow> CHAOS\<^sub>S\<^sub>K\<^sub>I\<^sub>P UNIV \<sqsubseteq>\<^sub>\<F>\<^sub>\<D> P\<close>
|
||||
\<close>
|
||||
text\<open>
|
||||
|
@ -742,8 +698,7 @@ in the HOLCF library), which are also relevant for our final Dining Philophers e
|
|||
These are essentially adaptions of k-induction schemes applied to domain-theoretic
|
||||
setting (so: requiring \<open>f\<close> continuous and \<open>P\<close> admissible; these preconditions are
|
||||
skipped here):
|
||||
\<^item> @{cartouche [display]\<open>... \<Longrightarrow> \<forall>i<k. P (f\<^sup>i \<bottom>) \<Longrightarrow> (\<forall>X. (\<forall>i<k. P (f\<^sup>i X)) \<longrightarrow> P (f\<^sup>k X))
|
||||
\<Longrightarrow> P (\<mu>X. f X)\<close>}
|
||||
\<^item> \<open>... \<Longrightarrow> \<forall>i<k. P (f\<^sup>i \<bottom>) \<Longrightarrow> (\<forall>X. (\<forall>i<k. P (f\<^sup>i X)) \<longrightarrow> P (f\<^sup>k X)) \<Longrightarrow> P (\<mu>X. f X)\<close>
|
||||
\<^item> \<open>... \<Longrightarrow> \<forall>i<k. P (f\<^sup>i \<bottom>) \<Longrightarrow> (\<forall>X. P X \<longrightarrow> P (f\<^sup>k X)) \<Longrightarrow> P (\<mu>X. f X)\<close>
|
||||
|
||||
|
||||
|
@ -753,8 +708,7 @@ it reduces the goal size.
|
|||
Another problem occasionally occurring in refinement proofs happens when the right side term
|
||||
involves more than one fixed-point process (\<^eg> \<open>P \<lbrakk>{A}\<rbrakk> Q \<sqsubseteq> S\<close>). In this situation,
|
||||
we need parallel fixed-point inductions. The HOLCF library offers only a basic one:
|
||||
\<^item> @{cartouche [display]\<open>... \<Longrightarrow> P \<bottom> \<bottom> \<Longrightarrow> (\<forall>X Y. P X Y \<Longrightarrow> P (f X) (g Y))
|
||||
\<Longrightarrow> P (\<mu>X. f X) (\<mu>X. g X)\<close>}
|
||||
\<^item> \<open>... \<Longrightarrow> P \<bottom> \<bottom> \<Longrightarrow> (\<forall>X Y. P X Y \<Longrightarrow> P (f X) (g Y)) \<Longrightarrow> P (\<mu>X. f X) (\<mu>X. g X)\<close>
|
||||
|
||||
|
||||
\<^noindent> This form does not help in cases like in \<open>P \<lbrakk>\<emptyset>\<rbrakk> Q \<sqsubseteq> S\<close> with the interleaving operator on the
|
||||
|
@ -1026,10 +980,6 @@ over finite sub-systems with globally infinite systems in a logically safe way.
|
|||
subsection*[bib::bibliography]\<open>References\<close>
|
||||
|
||||
close_monitor*[this]
|
||||
(*
|
||||
term\<open>\<longrightarrow>\<close>
|
||||
term\<open> demon \<sigma>\<^sub>g\<^sub>l\<^sub>o\<^sub>b\<^sub>a\<^sub>l := \<Sqinter> \<Delta>t \<in> \<real>\<^sub>>\<^sub>0. ||| i\<in>A. ACTOR i \<sigma>\<^sub>g\<^sub>l\<^sub>o\<^sub>b\<^sub>a\<^sub>l
|
||||
\<lbrakk>S\<rbrakk> sync!\<sigma>\<^sub>g\<^sub>l\<^sub>o\<^sub>b\<^sub>a\<^sub>l\<^sub>' \<longrightarrow> demon \<sigma>\<^sub>g\<^sub>l\<^sub>o\<^sub>b\<^sub>a\<^sub>l\<^sub>' \<close>
|
||||
*)
|
||||
|
||||
end
|
||||
(*>*)
|
Before Width: | Height: | Size: 27 KiB After Width: | Height: | Size: 27 KiB |
Before Width: | Height: | Size: 407 KiB After Width: | Height: | Size: 407 KiB |
Before Width: | Height: | Size: 23 KiB After Width: | Height: | Size: 23 KiB |
|
@ -1,4 +1,3 @@
|
|||
scholarly_paper
|
||||
technical_report
|
||||
CENELEC_50128
|
||||
cytology
|
Before Width: | Height: | Size: 162 KiB After Width: | Height: | Size: 162 KiB |
Before Width: | Height: | Size: 13 KiB After Width: | Height: | Size: 13 KiB |
Before Width: | Height: | Size: 91 KiB After Width: | Height: | Size: 91 KiB |
Before Width: | Height: | Size: 31 KiB After Width: | Height: | Size: 31 KiB |
|
@ -20,6 +20,9 @@ imports
|
|||
"Isabelle_DOF.technical_report"
|
||||
|
||||
begin
|
||||
|
||||
define_ontology "DOF-CC_terminology.sty" "CC"
|
||||
|
||||
(*>>*)
|
||||
text\<open>We re-use the class @\<open>typ math_content\<close>, which provides also a framework for
|
||||
semi-formal terminology, which we re-use by this definition.\<close>
|
||||
|
|
|
@ -0,0 +1,57 @@
|
|||
%% Copyright (C) University of Exeter
|
||||
%% University of Paris-Saclay
|
||||
%%
|
||||
%% License:
|
||||
%% This program can be redistributed and/or modified under the terms
|
||||
%% of the LaTeX Project Public License Distributed from CTAN
|
||||
%% archives in directory macros/latex/base/lppl.txt; either
|
||||
%% version 1.3c of the License, or (at your option) any later version.
|
||||
%% OR
|
||||
%% The 2-clause BSD-style license.
|
||||
%%
|
||||
%% SPDX-License-Identifier: LPPL-1.3c+ OR BSD-2-Clause
|
||||
|
||||
\NeedsTeXFormat{LaTeX2e}\relax
|
||||
\ProvidesPackage{DOF-CC_terminology}
|
||||
[00/00/0000 Document-Type Support Framework for Isabelle (CC).]
|
||||
|
||||
\RequirePackage{DOF-COL}
|
||||
\usepackage{etex}
|
||||
\ifdef{\reserveinserts}{\reserveinserts{28}}{}
|
||||
|
||||
|
||||
|
||||
|
||||
\newkeycommand*{\mathcc}[label=,type=%
|
||||
, scholarly_paper.math_content.short_name ={}%
|
||||
, scholarly_paper.math_content.mcc = %
|
||||
, Isa_COL.text_element.level =%
|
||||
, Isa_COL.text_element.referentiable =%
|
||||
, Isa_COL.text_element.variants =%
|
||||
, scholarly_paper.text_section.main_author =%
|
||||
, scholarly_paper.text_section.fixme_list =%
|
||||
, Isa_COL.text_element.level =%
|
||||
, scholarly_paper.technical.definition_list =%
|
||||
, scholarly_paper.technical.status =%
|
||||
, CC_terminology.concept_definition.tag=%
|
||||
, CC_terminology.concept_definition.short_tag=%
|
||||
]
|
||||
[1]
|
||||
{%
|
||||
\begin{isamarkuptext}%
|
||||
\ifthenelse{\equal{\commandkey{scholarly_paper.math_content.short_name}} {} }
|
||||
{%
|
||||
\begin{\commandkey{scholarly_paper.math_content.mcc}}\label{\commandkey{label}}
|
||||
#1
|
||||
\end{\commandkey{scholarly_paper.math_content.mcc}}
|
||||
}{%
|
||||
\begin{\commandkey{scholarly_paper.math_content.mcc}}[\commandkey{scholarly_paper.math_content.short_name}]\label{\commandkey{label}}
|
||||
#1
|
||||
\end{\commandkey{scholarly_paper.math_content.mcc}}
|
||||
}
|
||||
\end{isamarkuptext}%
|
||||
}
|
||||
|
||||
\expandafter\def\csname isaDof.text.scholarly_paper.math_content\endcsname{\mathcc}
|
||||
|
||||
|
|
@ -3,10 +3,12 @@ theory "document_setup"
|
|||
imports
|
||||
"Isabelle_DOF.technical_report"
|
||||
"Isabelle_DOF-Ontologies.CENELEC_50128"
|
||||
"Isabelle_DOF-Ontologies.CC_terminology"
|
||||
begin
|
||||
|
||||
use_template "scrreprt-modern"
|
||||
use_ontology "Isabelle_DOF.technical_report" and "Isabelle_DOF-Ontologies.CENELEC_50128"
|
||||
and "Isabelle_DOF-Ontologies.CC_terminology"
|
||||
|
||||
(*>*)
|
||||
|
||||
|
|
|
@ -21,7 +21,6 @@ no_notation "Isabelle_DOF_trace_attribute" ("@{trace-attribute _}")
|
|||
|
||||
consts Isabelle_DOF_typ :: "string \<Rightarrow> typ" ("@{typ _}")
|
||||
consts Isabelle_DOF_term :: "string \<Rightarrow> term" ("@{term _}")
|
||||
consts Isabelle_DOF_term_repr :: "string \<Rightarrow> term" ("@{termrepr _}")
|
||||
datatype "thm" = Isabelle_DOF_thm string ("@{thm _}") | Thm_content ("proof":proofterm)
|
||||
datatype "thms_of" = Isabelle_DOF_thms_of string ("@{thms-of _}")
|
||||
datatype "file" = Isabelle_DOF_file string ("@{file _}")
|
||||
|
@ -269,13 +268,6 @@ case term_option of
|
|||
val traces' = map conv (HOLogic.dest_list traces)
|
||||
in HOLogic.mk_list \<^Type>\<open>prod \<^typ>\<open>string\<close> \<^typ>\<open>string\<close>\<close> traces' end
|
||||
|
||||
(* utilities *)
|
||||
|
||||
fun property_list_dest ctxt X =
|
||||
map (fn \<^Const_>\<open>Isabelle_DOF_term for s\<close> => HOLogic.dest_string s
|
||||
|\<^Const_>\<open>Isabelle_DOF_term_repr for s\<close> => holstring_to_bstring ctxt (HOLogic.dest_string s))
|
||||
(HOLogic.dest_list X)
|
||||
|
||||
end; (* struct *)
|
||||
|
||||
\<close>
|
||||
|
@ -314,7 +306,6 @@ end)
|
|||
#>
|
||||
([(\<^const_name>\<open>Isabelle_DOF_typ\<close>, ISA_core.ML_isa_check_typ, ISA_core.ML_isa_elaborate_typ)
|
||||
,(\<^const_name>\<open>Isabelle_DOF_term\<close>, ISA_core.ML_isa_check_term, ISA_core.ML_isa_elaborate_term)
|
||||
,(\<^const_name>\<open>Isabelle_DOF_term_repr\<close>, ISA_core.check_identity, ISA_core.ML_isa_elaborate_generic)
|
||||
,(\<^const_name>\<open>Isabelle_DOF_docitem\<close>,
|
||||
ISA_core.ML_isa_check_docitem, ISA_core.ML_isa_elaborate_generic)
|
||||
,(\<^const_name>\<open>Isabelle_DOF_trace_attribute\<close>,
|
||||
|
|
|
@ -11,72 +11,187 @@
|
|||
* SPDX-License-Identifier: BSD-2-Clause
|
||||
*************************************************************************)
|
||||
|
||||
chapter\<open>Testing Freeform and Formal Elements from the scholarly-paper Ontology\<close>
|
||||
|
||||
theory
|
||||
AssnsLemmaThmEtc
|
||||
imports
|
||||
"Isabelle_DOF-Ontologies.Conceptual"
|
||||
"Isabelle_DOF.scholarly_paper"
|
||||
"Isabelle_DOF-Unit-Tests_document"
|
||||
TestKit
|
||||
begin
|
||||
|
||||
section\<open>Elementary Creation of Doc-items and Access of their Attibutes\<close>
|
||||
section\<open>Test Objective\<close>
|
||||
|
||||
text\<open>Testing Core Elements for \<^theory>\<open>Isabelle_DOF.scholarly_paper\<close> wrt. to
|
||||
existance, controlability via implicit and explicit default classes, and potential
|
||||
LaTeX Layout.\<close>
|
||||
|
||||
text\<open>Current status:\<close>
|
||||
print_doc_classes
|
||||
print_doc_items
|
||||
|
||||
|
||||
section\<open>An Example for use-before-declaration of Formal Content\<close>
|
||||
|
||||
section\<open>Definitions, Lemmas, Theorems, Assertions\<close>
|
||||
|
||||
term\<open>True\<close>
|
||||
text*[aa::F, properties = "[@{term ''True''}]"]
|
||||
\<open>Our definition of the HOL-Logic has the following properties:\<close>
|
||||
assert*\<open>F.properties @{F \<open>aa\<close>} = [@{term ''True''}]\<close>
|
||||
|
||||
text\<open>For now, as the term annotation is not bound to a meta logic which will translate
|
||||
\<^term>\<open>[@{term ''True''}]\<close> to \<^term>\<open>[True]\<close>, we can not use the HOL \<^const>\<open>True\<close> constant
|
||||
in the assertion.
|
||||
in the assertion.\<close>
|
||||
|
||||
ML\<open> @{term_ "[@{term \<open>True \<longrightarrow> True \<close>}]"}; (* with isa-check *) \<close>
|
||||
|
||||
ML\<open>
|
||||
(* Checking the default classes which should be in a neutral(unset) state. *)
|
||||
(* Note that in this state, the "implicit default" is "math_content". *)
|
||||
@{assert} (Config.get_global @{theory} Definition_default_class = "");
|
||||
@{assert} (Config.get_global @{theory} Lemma_default_class = "");
|
||||
@{assert} (Config.get_global @{theory} Theorem_default_class = "");
|
||||
@{assert} (Config.get_global @{theory} Proposition_default_class = "");
|
||||
@{assert} (Config.get_global @{theory} Premise_default_class = "");
|
||||
@{assert} (Config.get_global @{theory} Corollary_default_class = "");
|
||||
@{assert} (Config.get_global @{theory} Consequence_default_class = "");
|
||||
@{assert} (Config.get_global @{theory} Assumption_default_class = "");
|
||||
@{assert} (Config.get_global @{theory} Hypothesis_default_class = "");
|
||||
@{assert} (Config.get_global @{theory} Consequence_default_class = "");
|
||||
@{assert} (Config.get_global @{theory} Assertion_default_class = "");
|
||||
@{assert} (Config.get_global @{theory} Proof_default_class = "");
|
||||
@{assert} (Config.get_global @{theory} Example_default_class = "");
|
||||
\<close>
|
||||
|
||||
(* does not work in batch mode ...
|
||||
(* sample for accessing a property filled with previous assert's of "aa" *)
|
||||
ML\<open> ISA_core.property_list_dest @{context} @{docitem_attribute property :: aa};\<close>
|
||||
|
||||
Definition*[e1]\<open>Lorem ipsum dolor sit amet, ... \<close>
|
||||
text\<open>Note that this should yield a warning since \<^theory_text>\<open>Definition*\<close> uses as "implicit default" the class
|
||||
\<^doc_class>\<open>math_content\<close> which has no \<^term>\<open>text_element.level\<close> set, however in this context,
|
||||
it is required to be a positive number since it is \<^term>\<open>text_element.referentiable\<close> .
|
||||
This is intended behaviour in order to give the user a nudge to be more specific.\<close>
|
||||
|
||||
text\<open>A repair looks like this:\<close>
|
||||
declare [[Definition_default_class = "definition"]]
|
||||
|
||||
text\<open>Now, define a forward reference to the formal content: \<close>
|
||||
|
||||
declare_reference*[e1bisbis::"definition"]
|
||||
|
||||
text\<open>... which makes it possible to refer in a freeform definition to its formal counterpart
|
||||
which will appear textually later. With this pragmatics, an "out-of- order-presentation"
|
||||
can be achieved within \<^theory>\<open>Isabelle_DOF.scholarly_paper\<close> for the most common cases.\<close>
|
||||
|
||||
|
||||
|
||||
assert*[aa::F] " X \<and> Y \<longrightarrow> True" (*example with uni-code *)
|
||||
ML\<open> ISA_core.property_list_dest @{context} @{docitem_attribute property :: aa};
|
||||
app writeln (tl (rev it));\<close>
|
||||
Definition*[e1bis::"definition", short_name="\<open>Nice lemma.\<close>"]
|
||||
\<open>Lorem ipsum dolor sit amet, ...
|
||||
This is formally defined as follows in @{definition (unchecked) "e1bisbis"}\<close>
|
||||
definition*[e1bisbis, status=formal] e :: int where "e = 2"
|
||||
|
||||
section\<open>Tests for Theorems, Assertions, Assumptions, Hypothesis, etc.\<close>
|
||||
|
||||
declare [[Theorem_default_class = "theorem",
|
||||
Premise_default_class = "premise",
|
||||
Hypothesis_default_class = "hypothesis",
|
||||
Assumption_default_class = "assumption",
|
||||
Conclusion_default_class = "conclusion",
|
||||
Consequence_default_class = "consequence",
|
||||
Assertion_default_class = "assertion",
|
||||
Corollary_default_class = "corollary",
|
||||
Proof_default_class = "math_proof",
|
||||
Conclusion_default_class = "conclusion_stmt"]]
|
||||
|
||||
assert*[aa::F] "\<forall>x::bool. X \<and> Y \<longrightarrow> True" (*problem with uni-code *)
|
||||
*)
|
||||
ML\<open>
|
||||
Syntax.read_term_global @{theory} "[@{termrepr ''True @<longrightarrow> True''}]";
|
||||
(* this only works because the isa check is not activated in "term" !!! *)
|
||||
@{term "[@{term '' True @<longrightarrow> True ''}]"}; (* with isa-check *)
|
||||
@{term "[@{termrepr '' True @<longrightarrow> True ''}]"}; (* without isa check *)
|
||||
\<close>
|
||||
(*
|
||||
ML\<open>val [_,_,Const _ $ s,_] = (HOLogic.dest_list @{docitem_attribute property :: aa});
|
||||
val t = HOLogic.dest_string s;
|
||||
holstring_to_bstring @{context} t
|
||||
\<close>
|
||||
*)
|
||||
lemma "All (\<lambda>x. X \<and> Y \<longrightarrow> True)" oops
|
||||
Theorem*[e2]\<open>... suspendisse non arcu malesuada mollis, nibh morbi, ... \<close>
|
||||
|
||||
theorem*[e2bis::"theorem", status=formal] f : "e = 1+1" unfolding e_def by simp
|
||||
|
||||
text\<open>An example for the ontology specification character of the short-cuts such as
|
||||
@{command "assert*"}: in the following, we use the same notation referring to a completely
|
||||
different class. "F" and "assertion" have only in common that they posses the attribute
|
||||
\<^verbatim>\<open>property\<close>: \<close>
|
||||
(*<*) (* @{theorem "e2bis"} breaks LaTeX generation ... *)
|
||||
Lemma*[e3,level="Some 2"]
|
||||
\<open>... phasellus amet id massa nunc, pede suscipit repellendus, ... @{theorem "e2bis"} \<close>
|
||||
(*>*)
|
||||
Proof*[d10, short_name="\<open>Induction over Tinea pedis.\<close>"]\<open>Freeform Proof\<close>
|
||||
|
||||
text\<open>Creation just like that: \<close>
|
||||
lemma*[dfgd::"lemma"] q: "All (\<lambda>x. X \<and> Y \<longrightarrow> True)" oops
|
||||
text-assert-error\<open>@{lemma dfgd} \<close>\<open>Undefined instance:\<close> \<comment> \<open>oops‘ed objects are not referentiable.\<close>
|
||||
|
||||
text\<open>... in ut tortor eleifend augue pretium consectetuer...
|
||||
Lectus accumsan velit ultrices, ...\<close>
|
||||
|
||||
Proposition*[d2::"proposition"]\<open>"Freeform Proposition"\<close>
|
||||
|
||||
Assumption*[d3] \<open>"Freeform Assertion"\<close>
|
||||
|
||||
Premise*[d4]\<open>"Freeform Premise"\<close>
|
||||
|
||||
Corollary*[d5]\<open>"Freeform Corollary"\<close>
|
||||
|
||||
Consequence*[d6::scholarly_paper.consequence]\<open>"Freeform Consequence"\<close> \<comment> \<open>longname just for test\<close>
|
||||
|
||||
declare_reference*[ababa::scholarly_paper.assertion]
|
||||
Assertion*[d7]\<open>Freeform Assumption with forward reference to the formal
|
||||
@{assertion (unchecked) ababa}.\<close>
|
||||
assert*[ababa::assertion] "3 < (4::int)"
|
||||
assert*[ababab::assertion] "0 < (4::int)"
|
||||
|
||||
|
||||
Conclusion*[d8]\<open>"Freeform Conclusion"\<close>
|
||||
|
||||
Hypothesis*[d9]\<open>"Freeform Hypothesis"\<close>
|
||||
|
||||
Example*[d11::math_example]\<open>"Freeform Example"\<close>
|
||||
|
||||
text\<open>An example for the ontology specification character of the short-cuts such as
|
||||
@{command "assert*"}: in the following, we use the same notation referring to a completely
|
||||
different class. "F" and "assertion" have only in common that they posses the attribute
|
||||
@{const [names_short] \<open>properties\<close>}: \<close>
|
||||
|
||||
section\<open>Exhaustive Scholarly\_paper Test\<close>
|
||||
|
||||
subsection\<open>Global Structural Elements\<close>
|
||||
(* maybe it is neither necessary nor possible to test these here... title is unique in
|
||||
a document, for example. To be commented out of needed. *)
|
||||
text*[tt1::scholarly_paper.title]\<open>Lectus accumsan velit ultrices, ...\<close>
|
||||
text*[tt2::scholarly_paper.author]\<open>Lectus accumsan velit ultrices, ...\<close>
|
||||
text*[tt3::scholarly_paper.article]\<open>Lectus accumsan velit ultrices, ...\<close>
|
||||
text*[tt4::scholarly_paper.annex]\<open>Lectus accumsan velit ultrices, ...\<close>
|
||||
text*[tt5::scholarly_paper.abstract]\<open>Lectus accumsan velit ultrices, ...\<close>
|
||||
text*[tt6::scholarly_paper.subtitle]\<open>Lectus accumsan velit ultrices, ...\<close>
|
||||
text*[tt7::scholarly_paper.bibliography]\<open>Lectus accumsan velit ultrices, ...\<close>
|
||||
text*[tt8::scholarly_paper.introduction]\<open>Lectus accumsan velit ultrices, ...\<close>
|
||||
text*[tt9::scholarly_paper.related_work]\<open>Lectus accumsan velit ultrices, ...\<close>
|
||||
text*[tt11::scholarly_paper.text_section]\<open>Lectus accumsan velit ultrices, ...\<close>
|
||||
text*[tt12::scholarly_paper.background ]\<open>Lectus accumsan velit ultrices, ...\<close>
|
||||
text*[tt13::scholarly_paper.conclusion ]\<open>Lectus accumsan velit ultrices, ...\<close>
|
||||
|
||||
subsection\<open>Technical Content Specific Elements\<close>
|
||||
|
||||
text*[tu1::scholarly_paper.axiom ]\<open>Lectus accumsan velit ultrices, ...\<close>
|
||||
text*[tu1bis::scholarly_paper.math_content, mcc="axm" ]\<open>Lectus accumsan velit ultrices, ...\<close>
|
||||
text*[tu2::scholarly_paper.lemma ]\<open>Lectus accumsan velit ultrices, ...\<close>
|
||||
text*[tu3::scholarly_paper.example ]\<open>Lectus accumsan velit ultrices, ...\<close>
|
||||
text*[tu4::scholarly_paper.premise ]\<open>Lectus accumsan velit ultrices, ...\<close>
|
||||
text*[tu5::scholarly_paper.theorem ]\<open>Lectus accumsan velit ultrices, ...\<close>
|
||||
text*[tu6::scholarly_paper.assertion]\<open>Lectus accumsan velit ultrices, ...\<close>
|
||||
text*[tu7::scholarly_paper.corollary]\<open>Lectus accumsan velit ultrices, ...\<close>
|
||||
text*[tu9::scholarly_paper.technical]\<open>Lectus accumsan velit ultrices, ...\<close>
|
||||
text*[tu10::scholarly_paper.assumption ]\<open>Lectus accumsan velit ultrices, ...\<close>
|
||||
text*[tu13::scholarly_paper.definition ]\<open>Lectus accumsan velit ultrices, ...\<close>
|
||||
text*[tu15::scholarly_paper.experiment ]\<open>Lectus accumsan velit ultrices, ...\<close>
|
||||
text*[tu16::scholarly_paper.hypothesis ]\<open>Lectus accumsan velit ultrices, ...\<close>
|
||||
text*[tu17::scholarly_paper.math_proof ]\<open>Lectus accumsan velit ultrices, ...\<close>
|
||||
text*[tu18::scholarly_paper.consequence]\<open>Lectus accumsan velit ultrices, ...\<close>
|
||||
text*[tu26::scholarly_paper.math_explanation]\<open>Lectus accumsan velit ultrices, ...\<close>
|
||||
text*[tu19::scholarly_paper.math_formal]\<open>Lectus accumsan velit ultrices, ...\<close>
|
||||
text*[tu20::scholarly_paper.proposition]\<open>Lectus accumsan velit ultrices, ...\<close>
|
||||
text*[tu21::scholarly_paper.math_content ]\<open>Lectus accumsan velit ultrices, ...\<close>
|
||||
text*[tu22::scholarly_paper.math_example ]\<open>Lectus accumsan velit ultrices, ...\<close>
|
||||
text*[tu23::scholarly_paper.conclusion_stmt ]\<open>Lectus accumsan velit ultrices, ...\<close>
|
||||
text*[tu24::scholarly_paper.math_motivation ]\<open>Lectus accumsan velit ultrices, ...\<close>
|
||||
text*[tu25::scholarly_paper.tech_definition ]\<open>Lectus accumsan velit ultrices, ...\<close>
|
||||
text*[tu28::scholarly_paper.eng_example ]\<open>Lectus accumsan velit ultrices, ...\<close>
|
||||
text*[tt10::scholarly_paper.tech_example]\<open>Lectus accumsan velit ultrices, ...\<close>
|
||||
text*[tu8::scholarly_paper.tech_code] \<open>Lectus accumsan velit ultrices, ...\<close>
|
||||
text*[tu27::scholarly_paper.engineering_content]\<open>Lectus accumsan velit ultrices, ...\<close>
|
||||
text*[tu14::scholarly_paper.evaluation ]\<open>Lectus accumsan velit ultrices, ...\<close>
|
||||
|
||||
end
|
||||
(*>*)
|
||||
|
|
|
@ -128,10 +128,10 @@ DOF_core.get_attribute_info "Conceptual.C" "z" @{theory};
|
|||
|
||||
|
||||
ML\<open>
|
||||
DOF_core.get_value_local "sdf" @{context};
|
||||
DOF_core.get_value_local "sdfg" @{context};
|
||||
DOF_core.get_value_local "dfgdfg" @{context};
|
||||
DOF_core.get_value_local "omega" @{context};
|
||||
DOF_core.value_of "sdf" \<^theory>;
|
||||
DOF_core.value_of "sdfg" \<^theory>;
|
||||
DOF_core.value_of "dfgdfg" \<^theory>;
|
||||
DOF_core.value_of "omega" \<^theory>;
|
||||
\<close>
|
||||
|
||||
text\<open>A not too trivial test: default y -> [].
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
|
||||
chapter\<open>Testing hand-programmed (low-level) Invariants\<close>
|
||||
|
||||
theory Concept_Example_Low_Level_Invariant
|
||||
theory Concept_Example_Low_Level_Invariant
|
||||
imports
|
||||
"Isabelle_DOF-Unit-Tests_document"
|
||||
"Isabelle_DOF-Ontologies.Conceptual" (* we use the generic "Conceptual" ontology *)
|
||||
|
@ -122,7 +122,7 @@ in DOF_core.add_ml_invariant binding (DOF_core.make_ml_invariant (check_M_invari
|
|||
section\<open>Example: Monitor Class Invariant\<close>
|
||||
|
||||
open_monitor*[struct::M]
|
||||
|
||||
|
||||
subsection*[a::A, x = "3"] \<open> Lorem ipsum dolor sit amet, ... \<close>
|
||||
|
||||
text*[c1::C, x = "''beta''"] \<open> ... suspendisse non arcu malesuada mollis, nibh morbi, ... \<close>
|
||||
|
|
|
@ -77,8 +77,10 @@ text-assert-error[test_monitor_A1::test_monitor_A]\<open>\<close>
|
|||
\<open>accepts clause 1 of monitor Concept_MonitorTest1.test_monitor_M rejected\<close>
|
||||
declare[[strict_monitor_checking=false]]
|
||||
text*[test_monitor_A1::test_monitor_A]\<open>\<close> \<comment> \<open>the same in non-strict monitor checking.\<close>
|
||||
|
||||
text*[testFree2::test_monitor_free]\<open>\<close>
|
||||
declare[[free_class_in_monitor_strict_checking]]
|
||||
text-assert-error[testFree2::test_monitor_free]\<open>\<close>
|
||||
\<open>accepts clause 1 of monitor Concept_MonitorTest1.test_monitor_M not enabled\<close>
|
||||
declare[[free_class_in_monitor_strict_checking=false]]
|
||||
text*[test_monitor_head1::test_monitor_head]\<open>\<close>
|
||||
text*[testFree3::test_monitor_free]\<open>\<close>
|
||||
text*[test_monitor_B1::test_monitor_B]\<open>\<close>
|
||||
|
@ -107,5 +109,19 @@ value*\<open>map fst @{trace-attribute \<open>test_monitor_M\<close>}\<close>
|
|||
|
||||
ML\<open>@{assert} ([("Conceptual.A", "test"), ("Conceptual.F", "sdfg")] = @{trace_attribute aaa}) \<close>
|
||||
|
||||
|
||||
open_monitor*[test_monitor_M3::monitor_M]
|
||||
|
||||
declare[[strict_monitor_checking]]
|
||||
text-assert-error[test_monitor_A2::test_monitor_A]\<open>\<close>
|
||||
\<open>accepts clause 1 of monitor Concept_MonitorTest1.test_monitor_M3 rejected\<close>
|
||||
declare[[strict_monitor_checking=false]]
|
||||
text*[test_monitor_A3::test_monitor_A]\<open>\<close> \<comment> \<open>the same in non-strict monitor checking.\<close>
|
||||
declare[[free_class_in_monitor_strict_checking]]
|
||||
text-assert-error[testFree7::test_monitor_free]\<open>\<close>
|
||||
\<open>accepts clause 1 of monitor Concept_MonitorTest1.test_monitor_M3 not enabled\<close>
|
||||
declare[[free_class_in_monitor_strict_checking=false]]
|
||||
|
||||
|
||||
end
|
||||
|
||||
|
|
|
@ -0,0 +1,65 @@
|
|||
(*************************************************************************
|
||||
* Copyright (C)
|
||||
* 2019-2023 The University of Exeter
|
||||
* 2018-2023 The University of Paris-Saclay
|
||||
* 2018 The University of Sheffield
|
||||
*
|
||||
* License:
|
||||
* This program can be redistributed and/or modified under the terms
|
||||
* of the 2-clause BSD-style license.
|
||||
*
|
||||
* SPDX-License-Identifier: BSD-2-Clause
|
||||
*************************************************************************)
|
||||
|
||||
chapter\<open>Testing Nested Monitors\<close>
|
||||
|
||||
theory
|
||||
Concept_MonitorTest2
|
||||
imports
|
||||
Concept_MonitorTest1
|
||||
begin
|
||||
|
||||
section\<open>Test Purpose.\<close>
|
||||
text\<open> Creation of document parts that are controlled by (nested, locally defined) monitors. \<close>
|
||||
|
||||
doc_class monitor_M =
|
||||
tmM :: int
|
||||
rejects "test_monitor_B"
|
||||
accepts "test_monitor_E ~~ test_monitor_C"
|
||||
|
||||
doc_class test_monitor_head =
|
||||
tmhd :: int
|
||||
|
||||
declare[[free_class_in_monitor_checking]]
|
||||
|
||||
declare[[free_class_in_monitor_strict_checking]]
|
||||
text-assert-error[test_monitor_head1::test_monitor_head]\<open>\<close>
|
||||
\<open>accepts clause 1 of monitor Concept_MonitorTest1.test_monitor_M3 not enabled\<close>
|
||||
declare[[free_class_in_monitor_strict_checking=false]]
|
||||
text*[test_monitor_head2::Concept_MonitorTest1.test_monitor_head]\<open>\<close>
|
||||
|
||||
open_monitor*[test_monitor_M3::monitor_M]
|
||||
|
||||
text*[test_monitor_head3::Concept_MonitorTest1.test_monitor_head]\<open>\<close>
|
||||
text*[testFree3::test_monitor_free]\<open>\<close>
|
||||
declare[[strict_monitor_checking]]
|
||||
text-assert-error[test_monitor_B1::test_monitor_B]\<open>\<close>
|
||||
\<open>accepts clause 1 of monitor Concept_MonitorTest2.test_monitor_M3 rejected\<close>
|
||||
declare[[strict_monitor_checking=false]]
|
||||
text*[testFree4::test_monitor_free]\<open>\<close>
|
||||
declare[[strict_monitor_checking]]
|
||||
text-assert-error[test_monitor_D1::test_monitor_D]\<open>\<close>
|
||||
\<open>accepts clause 1 of monitor Concept_MonitorTest2.test_monitor_M3 rejected\<close>
|
||||
declare[[strict_monitor_checking=false]]
|
||||
text*[testFree5::test_monitor_free]\<open>\<close>
|
||||
text*[test_monitor_E1::test_monitor_E]\<open>\<close>
|
||||
text*[test_monitor_C1::test_monitor_C]\<open>\<close>
|
||||
text*[testFree6::test_monitor_free]\<open>\<close>
|
||||
|
||||
close_monitor*[Concept_MonitorTest1.test_monitor_M3]
|
||||
|
||||
close_monitor*[test_monitor_M3]
|
||||
|
||||
declare[[free_class_in_monitor_checking = false]]
|
||||
|
||||
end
|
|
@ -94,6 +94,17 @@ definition*[a2::A, x=5, level="Some 1"] xx' where "xx' \<equiv> A.x @{A \<open>a
|
|||
|
||||
lemma*[e5::E] testtest : "xx + A.x @{A \<open>a1\<close>} = yy + A.x @{A \<open>a1\<close>} \<Longrightarrow> xx = yy" by simp
|
||||
|
||||
doc_class cc_assumption_test =
|
||||
a :: int
|
||||
text*[cc_assumption_test_ref::cc_assumption_test]\<open>\<close>
|
||||
|
||||
definition tag_l :: "'a \<Rightarrow> 'b \<Rightarrow> 'b" where "tag_l \<equiv> \<lambda>x y. y"
|
||||
|
||||
lemma* tagged : "tag_l @{cc-assumption-test \<open>cc_assumption_test_ref\<close>} AA \<Longrightarrow> AA"
|
||||
by (simp add: tag_l_def)
|
||||
|
||||
find_theorems name:tagged "(_::cc_assumption_test \<Rightarrow> _ \<Rightarrow> _) _ _ \<Longrightarrow>_"
|
||||
|
||||
declare_reference-assert-error[c1::C]\<open>Duplicate instance declaration\<close> \<comment> \<open>forward declaration\<close>
|
||||
|
||||
declare_reference*[e6::E]
|
||||
|
|
|
@ -1,30 +1,47 @@
|
|||
chapter\<open>Evaluation\<close>
|
||||
(*************************************************************************
|
||||
* Copyright (C)
|
||||
* 2019-2023 The University of Exeter
|
||||
* 2018-2023 The University of Paris-Saclay
|
||||
* 2018 The University of Sheffield
|
||||
*
|
||||
* License:
|
||||
* This program can be redistributed and/or modified under the terms
|
||||
* of the 2-clause BSD-style license.
|
||||
*
|
||||
* SPDX-License-Identifier: BSD-2-Clause
|
||||
*************************************************************************)
|
||||
|
||||
text\<open>Term Annotation Antiquotations (TA) can be evaluated with the help of the value* command.\<close>
|
||||
chapter\<open>Term-Antiquotation Expansions and Evaluation\<close>
|
||||
|
||||
theory
|
||||
Evaluation
|
||||
Concept_TermEvaluation
|
||||
imports
|
||||
"Isabelle_DOF-Unit-Tests.Concept_TermAntiquotations"
|
||||
"Isabelle_DOF-Unit-Tests.Concept_High_Level_Invariants"
|
||||
TestKit
|
||||
begin
|
||||
begin
|
||||
|
||||
section\<open>Test Purpose.\<close>
|
||||
text\<open> Creation of ontological instances along the \<^theory>\<open>Isabelle_DOF-Ontologies.Conceptual\<close>
|
||||
Ontology. Emphasis is put on type-safe (ontologically consistent) referencing of text, code and
|
||||
proof elements. Some tests cover also the critical cases concerning name spaces of oid's. \<close>
|
||||
|
||||
|
||||
(*
|
||||
section\<open>\<^theory_text>\<open>ML*\<close>-Annotated SML-commands\<close>
|
||||
|
||||
ML*[thefunction::B,x=\<open>\<open>dfg\<close>\<close>]\<open>fun fac x = if x = 0 then 1 else x * fac(x-1);
|
||||
val t = \<^value_>\<open>x @{B \<open>thefunction\<close>}\<close>\<close>
|
||||
ML\<open>fac 5; t\<close> \<comment> \<open>this is a test that ML* is actually evaluated and the
|
||||
resulting toplevel state is preserved.\<close>
|
||||
text-macro*[the::C]\<open> @{B [display] "thefunction"} \<close>
|
||||
text*[the::C]\<open> @{B "thefunction"} \<close>
|
||||
|
||||
text\<open>... and here we reference @{B [display] \<open>thefunction\<close>}.\<close>
|
||||
text\<open>... and here we reference @{B \<open>thefunction\<close>}.\<close>
|
||||
|
||||
*)
|
||||
|
||||
section\<open>\<^theory_text>\<open>value*\<close>-Annotated evaluation-commands\<close>
|
||||
|
||||
section\<open>Term-Annotation and its Evaluation\<close>
|
||||
|
||||
text\<open>Term Annotation Antiquotations (TA) can be evaluated with the help of the value* command.\<close>
|
||||
|
||||
text\<open>The value* command uses the same code as the value command
|
||||
and adds the possibility to evaluate Term Annotation Antiquotations (TA).
|
||||
|
@ -36,13 +53,6 @@ Some built-ins remain as unspecified constants:
|
|||
\<^item> the docitem TA offers a way to check the reference of class instances
|
||||
without checking the instances type.
|
||||
It must be avoided for certification
|
||||
\<^item> the termrepr TA is left as unspecified constant for now.
|
||||
A major refactoring of code should be done to enable
|
||||
referential equivalence for termrepr, by changing the dependency
|
||||
between the Isa-DOF theory and the Assert theory.
|
||||
The assert-cmd function in Assert should use the value* command
|
||||
functions, which make the elaboration of the term
|
||||
referenced by the TA before passing it to the evaluator
|
||||
|
||||
We also have the possibility to make some requests on classes instances, i.e. on docitems
|
||||
by specifying the doc class.
|
||||
|
@ -79,14 +89,11 @@ an instance of the class @{doc_class A}:
|
|||
\<close>
|
||||
term*\<open>@{A \<open>xcv1\<close>}\<close>
|
||||
|
||||
text\<open>The instance class @{docitem \<open>xcv1\<close>} is not an instance of the class @{doc_class B}:
|
||||
\<close>
|
||||
(* Error:
|
||||
term*\<open>@{B \<open>xcv1\<close>}\<close>
|
||||
*)
|
||||
text\<open>The instance class @{docitem \<open>xcv1\<close>} is not an instance of the class @{doc_class B}:\<close>
|
||||
value-assert-error\<open>@{B \<open>xcv1\<close>}\<close>\<open>xcv1 is not an instance of Conceptual.B\<close>
|
||||
|
||||
text\<open>We can evaluate the instance class. The current implementation returns
|
||||
the value of the instance, i.e. a collection of every attribute of the instance:
|
||||
\<close>
|
||||
the value of the instance, i.e. a collection of every attribute of the instance: \<close>
|
||||
value*\<open>@{A \<open>xcv1\<close>}\<close>
|
||||
|
||||
text\<open>We can also get the value of an attribute of the instance:\<close>
|
||||
|
@ -112,24 +119,20 @@ text\<open>We can also evaluate the instance @{docitem \<open>xcv4\<close>}.
|
|||
The attribute \<open>b\<close> of the instance @{docitem \<open>xcv4\<close>} is of type @{typ "(A \<times> C) set"}
|
||||
but the instance @{docitem \<open>xcv4\<close>} initializes the attribute by using the \<open>docitem\<close> TA.
|
||||
Then the instance can be evaluate but only the references of the classes of the set
|
||||
used in the \<open>b\<close> attribute will be checked, and the type of these classes will not:
|
||||
\<close>
|
||||
used in the \<open>b\<close> attribute will be checked, and the type of these classes will not:\<close>
|
||||
value* \<open>@{F \<open>xcv4\<close>}\<close>
|
||||
|
||||
(*
|
||||
|
||||
text\<open>If we want the classes to be checked,
|
||||
we can use the TA which will also check the type of the instances.
|
||||
The instance @{A \<open>xcv3\<close>} is of type @{typ "A"} and the instance @{C \<open>xcv2\<close>} is of type @{typ "C"}:
|
||||
\<close>
|
||||
The instance @{A \<open>xcv3\<close>} is of type @{typ "A"} and the instance @{C \<open>xcv2\<close>} is of type @{typ "C"}:\<close>
|
||||
update_instance*[xcv4::F, b+="{(@{A ''xcv3''},@{C ''xcv2''})}"]
|
||||
*)
|
||||
|
||||
text\<open>Using a TA in terms is possible, and the term is evaluated:\<close>
|
||||
value*\<open>[@{thm \<open>HOL.refl\<close>}, @{thm \<open>HOL.refl\<close>}]\<close>
|
||||
value*\<open>@{thm ''HOL.refl''} = @{thm (''HOL.refl'')}\<close>
|
||||
|
||||
ML\<open>
|
||||
@{thm "refl"}
|
||||
\<close>
|
||||
ML\<open>@{thm "refl"}\<close>
|
||||
|
||||
section\<open>Request on instances\<close>
|
||||
|
||||
|
@ -167,34 +170,41 @@ value*\<open>filter (\<lambda>\<sigma>. A.x \<sigma> > 5) @{A-instances}\<close>
|
|||
section\<open>Limitations\<close>
|
||||
|
||||
text\<open>There are still some limitations.
|
||||
The terms passed as arguments to the TA are not simplified and their evaluation fails:
|
||||
The terms passed as arguments to a TA are not simplified \<open>before\<close> expansion
|
||||
and their evaluation therefore fails:
|
||||
\<close>
|
||||
(* Error:
|
||||
value*\<open>@{thm (''HOL.re'' @ ''fl'')}\<close>
|
||||
value*\<open>@{thm ''HOL.refl''} = @{thm (''HOL.re'' @ ''fl'')}\<close>*)
|
||||
|
||||
value\<open>@{thm (''HOL.re'' @ ''fl'')}\<close>
|
||||
value-assert-error\<open>@{thm (''HOL.re'' @ ''fl'')}\<close>
|
||||
\<open>wrong term format: must be string constant\<close>
|
||||
value-assert-error\<open>@{thm ''HOL.refl''} = @{thm (''HOL.re'' @ ''fl'')}\<close>
|
||||
\<open>wrong term format: must be string constant\<close>
|
||||
|
||||
text\<open>The type checking is unaware that a class is subclass of another one.
|
||||
The @{doc_class "G"} is a subclass of the class @{doc_class "C"}, but one can not use it
|
||||
to update the instance @{docitem \<open>xcv4\<close>}:
|
||||
\<close>
|
||||
(* Error:
|
||||
update_instance*[xcv4::F, b+="{(@{A ''xcv3''},@{G ''xcv5''})}"]*)
|
||||
|
||||
update_instance-assert-error[xcv4::F, b+="{(@{A ''xcv3''},@{G ''xcv5''})}"]
|
||||
\<open>type of attribute: Conceptual.F.b does not fit to term\<close>
|
||||
|
||||
|
||||
section\<open>\<^theory_text>\<open>assert*\<close>-Annotated assertion-commands\<close>
|
||||
|
||||
text\<open>The \<^emph>\<open>assert*\<close>-command allows for logical statements to be checked in the global context.
|
||||
It uses the same implementation as the \<^emph>\<open>value*\<close>-command and has the same limitations.
|
||||
Recall that it uses the same mechanism as the \<^emph>\<open>value*\<close>-command but requires that the evaluation
|
||||
reduces the argument term to true.
|
||||
Consequently, it has the same limitations as \<^emph>\<open>value*\<close>.
|
||||
\<close>
|
||||
|
||||
text\<open>Using the ontology defined in \<^theory>\<open>Isabelle_DOF-Unit-Tests.Concept_High_Level_Invariants\<close>
|
||||
we can check logical statements:\<close>
|
||||
(*
|
||||
term*\<open>authored_by @{introduction \<open>introduction2\<close>} = authored_by @{introduction \<open>introduction3\<close>}\<close>
|
||||
assert*\<open>authored_by @{introduction \<open>introduction2\<close>} = authored_by @{introduction \<open>introduction3\<close>}\<close>
|
||||
assert*\<open>\<not>(authored_by @{introduction \<open>introduction2\<close>}
|
||||
= authored_by @{introduction \<open>introduction4\<close>})\<close>
|
||||
*)
|
||||
|
||||
term*\<open>authored_by @{Introduction \<open>introduction2\<close>} = authored_by @{Introduction \<open>introduction3\<close>}\<close>
|
||||
assert*\<open>authored_by @{Introduction \<open>introduction2\<close>} = authored_by @{Introduction \<open>introduction3\<close>}\<close>
|
||||
assert*\<open>\<not>(authored_by @{Introduction \<open>introduction2\<close>}
|
||||
= authored_by @{Introduction \<open>introduction4\<close>})\<close>
|
||||
|
||||
text\<open>Assertions must be boolean expressions, so the following assertion triggers an error:\<close>
|
||||
(* Error:
|
||||
assert*\<open>@{introduction \<open>introduction2\<close>}\<close>*)
|
||||
|
@ -202,19 +212,19 @@ assert*\<open>@{introduction \<open>introduction2\<close>}\<close>*)
|
|||
text\<open>Assertions must be true, hence the error:\<close>
|
||||
(* Error:
|
||||
assert*\<open>{@{author \<open>curry\<close>}} = {@{author \<open>church\<close>}}\<close>*)
|
||||
(*
|
||||
|
||||
term*\<open>property @{result \<open>resultProof\<close>} = property @{result \<open>resultProof2\<close>}\<close>
|
||||
assert*[assertionA::A]\<open>\<not> property @{result \<open>resultProof\<close>} = property @{result \<open>resultProof2\<close>}\<close>
|
||||
|
||||
text-macro*[assertionAA::A]\<open>@{A [display] "assertionA"}\<close>
|
||||
text\<open>... and here we reference @{A [display] \<open>assertionA\<close>}.\<close>
|
||||
text*[assertionAA::A]\<open>@{A "assertionA"}\<close>
|
||||
text\<open>... and here we reference @{A \<open>assertionA\<close>}.\<close>
|
||||
|
||||
assert*\<open>evidence @{result \<open>resultProof\<close>} = evidence @{result \<open>resultProof2\<close>}\<close>
|
||||
*)
|
||||
|
||||
text\<open>The optional evaluator of \<open>value*\<close> and \<open>assert*\<close> must be specified after the meta arguments:\<close>
|
||||
value* [optional_test_A::A, x=6] [nbe] \<open>filter (\<lambda>\<sigma>. A.x \<sigma> > 5) @{A-instances}\<close>
|
||||
(*
|
||||
|
||||
assert* [resultProof3::result, evidence = "proof", property="[@{thm \<open>HOL.sym\<close>}]"] [nbe]
|
||||
\<open>evidence @{result \<open>resultProof3\<close>} = evidence @{result \<open>resultProof2\<close>}\<close>
|
||||
*)
|
||||
|
||||
end
|
|
@ -1,20 +1,30 @@
|
|||
(*<*)
|
||||
(*************************************************************************
|
||||
* Copyright (C)
|
||||
* 2019-2023 The University of Exeter
|
||||
* 2018-2023 The University of Paris-Saclay
|
||||
* 2018 The University of Sheffield
|
||||
*
|
||||
* License:
|
||||
* This program can be redistributed and/or modified under the terms
|
||||
* of the 2-clause BSD-style license.
|
||||
*
|
||||
* SPDX-License-Identifier: BSD-2-Clause
|
||||
*************************************************************************)
|
||||
theory "Isabelle_DOF-Unit-Tests_document"
|
||||
imports
|
||||
"Isabelle_DOF.technical_report"
|
||||
"Isabelle_DOF-Ontologies.CENELEC_50128"
|
||||
(* "Isabelle_DOF-Ontologies.CENELEC_50128" where do we use this - bu *)
|
||||
|
||||
begin
|
||||
|
||||
use_template "scrreprt-modern"
|
||||
use_ontology "technical_report" and "Isabelle_DOF-Ontologies.CENELEC_50128"
|
||||
|
||||
use_ontology "technical_report" (* and "Isabelle_DOF-Ontologies.CENELEC_50128" *)
|
||||
(*>*)
|
||||
(* BUG: Invariant checking should not go across sessions boundaries.
|
||||
|
||||
title*[title::title] \<open>Isabelle/DOF\<close>
|
||||
subtitle*[subtitle::subtitle]\<open>Unit Tests\<close>
|
||||
*)
|
||||
title*[title::title] \<open>The Isabelle/DOF Implementation\<close>
|
||||
subtitle*[subtitle::subtitle]\<open>The Unit-Test Suite\<close>
|
||||
|
||||
(*<*)
|
||||
end
|
||||
(*>*)
|
||||
|
|
|
@ -11,6 +11,9 @@
|
|||
* SPDX-License-Identifier: BSD-2-Clause
|
||||
*************************************************************************)
|
||||
|
||||
chapter\<open>LaTeX Generation Tests\<close>
|
||||
|
||||
|
||||
theory Latex_Tests
|
||||
imports "TestKit"
|
||||
"Isabelle_DOF-Unit-Tests_document"
|
||||
|
@ -48,7 +51,7 @@ text-[asd::B]
|
|||
\textbf{TEST}
|
||||
@{cartouche [display]
|
||||
\<open>text*[dfgdfg::B]
|
||||
\<open> Lorem ipsum ... @{thm refl} Frédéric \textbf{TEST} \verb+sdf+ \<open>dfgdfg\<close> \<close>
|
||||
\<open> Lorem ipsum ... @{thm refl} Frederic \textbf{TEST} \verb+sdf+ \<open>dfgdfg\<close> \<close>
|
||||
\<close>}
|
||||
\<close>
|
||||
|
||||
|
@ -57,10 +60,9 @@ text-latex\<open>... and here is its application macro expansion:
|
|||
\textbf{TEST}
|
||||
@{cartouche [display]
|
||||
\<open>text*[dfgdfg::B]
|
||||
\<open> Lorem ipsum ... @{thm refl} Frédéric \textbf{TEST} \verb+sdf+ \<open>dfgdfg\<close> \<close>
|
||||
\<open> Lorem ipsum ... @{thm refl} Fr\'ed\'eric \textbf{TEST} \verb+sdf+ \<open>dfgdfg\<close> \<close>
|
||||
\<close>}\<close>
|
||||
|
||||
(*<*)
|
||||
text-latex\<open> \<^theory_text>\<open>definition df = ...
|
||||
\<close>
|
||||
@{ML [display] \<open> let val x = 3 + 4 in true end
|
||||
|
@ -70,12 +72,11 @@ text-latex\<open> \<^theory_text>\<open>definition df = ...
|
|||
\<close>}
|
||||
|
||||
@{verbatim [display] \<open> Lorem ipsum ... @{thm refl}
|
||||
Frédéric \textbf{TEST} \verb+sdf+ \<open>dfgdfg\<close> \<close>}
|
||||
Fr\'ed\'eric \textbf{TEST} \verb+sdf+ \<open>dfgdfg\<close> \<close>}
|
||||
@{theory_text [display] \<open>definition df = ... \<lambda>x.
|
||||
\<close>}
|
||||
@{cartouche [display] \<open> @{figure "cfgdfg"}\<close>} \<close>
|
||||
|
||||
(*>*)
|
||||
text\<open>Final Status:\<close>
|
||||
print_doc_items
|
||||
print_doc_classes
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
session "Isabelle_DOF-Unit-Tests" = "Isabelle_DOF-Ontologies" +
|
||||
options [document = pdf, document_output = "output", document_build = dof]
|
||||
options [document = pdf, document_output = "output", document_build = dof, document_variants = "document:overview=-proof,-ML,-unimportant"]
|
||||
theories
|
||||
"TestKit"
|
||||
"Latex_Tests"
|
||||
|
@ -7,9 +7,10 @@ session "Isabelle_DOF-Unit-Tests" = "Isabelle_DOF-Ontologies" +
|
|||
"Concept_Example_Low_Level_Invariant"
|
||||
"Concept_High_Level_Invariants"
|
||||
"Concept_MonitorTest1"
|
||||
"Concept_MonitorTest2"
|
||||
"Concept_TermAntiquotations"
|
||||
"Concept_TermEvaluation"
|
||||
"Attributes"
|
||||
"Evaluation"
|
||||
"AssnsLemmaThmEtc"
|
||||
"Ontology_Matching_Example"
|
||||
"Cenelec_Test"
|
||||
|
|
|
@ -11,6 +11,8 @@
|
|||
* SPDX-License-Identifier: BSD-2-Clause
|
||||
*************************************************************************)
|
||||
|
||||
chapter\<open>The Isabelle/DOF TestKit\<close>
|
||||
|
||||
theory
|
||||
TestKit
|
||||
imports
|
||||
|
@ -24,8 +26,7 @@ keywords "text-" "text-latex" :: document_body
|
|||
|
||||
begin
|
||||
|
||||
section\<open>Testing Commands (exec-catch-verify - versions of std commands)\<close>
|
||||
|
||||
section\<open>Testing Commands (exec-catch-verify - versions of DOF commands)\<close>
|
||||
|
||||
ML\<open>
|
||||
|
||||
|
@ -65,16 +66,14 @@ fun gen_enriched_document_command2 name {body} cid_transform attr_transform mark
|
|||
This implies that several text block can be
|
||||
processed in parallel in a future, as long
|
||||
as they are associated to one meta arg.\<close>
|
||||
|
||||
(* ... generating the level-attribute syntax *)
|
||||
in
|
||||
(if meta_args = ODL_Meta_Args_Parser.empty_meta_args
|
||||
val handle_margs_opt = (if meta_args = ODL_Meta_Args_Parser.empty_meta_args
|
||||
then I
|
||||
else
|
||||
Value_Command.Docitem_Parser.create_and_check_docitem
|
||||
{is_monitor = false} {is_inline = false} {define = true}
|
||||
oid pos (cid_transform cid_pos) (attr_transform doc_attrs))
|
||||
#> (fn thy => (app (check_n_tex_text thy) toks_list; thy))
|
||||
(* ... generating the level-attribute syntax *)
|
||||
in handle_margs_opt #> (fn thy => (app (check_n_tex_text thy) toks_list; thy))
|
||||
end;
|
||||
|
||||
val _ =
|
||||
|
@ -99,15 +98,14 @@ fun output_document2 state markdown txt =
|
|||
in Document_Output.output_document ctxt markdown txt end;
|
||||
|
||||
fun document_command2 markdown (loc, txt) =
|
||||
Toplevel.keep (fn state =>
|
||||
(case loc of
|
||||
let fun doc2 state = (case loc of
|
||||
NONE => ignore (output_document2 state markdown txt)
|
||||
| SOME (_, pos) =>(ISA_core.err
|
||||
"Illegal target specification -- not a theory context"
|
||||
pos)))
|
||||
o
|
||||
Toplevel.present_local_theory loc (fn state => (output_document2 state markdown txt));
|
||||
|
||||
pos))
|
||||
fun out2 state = output_document2 state markdown txt
|
||||
in Toplevel.keep doc2 o Toplevel.present_local_theory loc out2
|
||||
end
|
||||
|
||||
fun gen_enriched_document_command3 assert name body trans at md (margs, src_list) thy
|
||||
= (gen_enriched_document_command2 name body trans at md (margs, src_list) thy)
|
||||
|
@ -146,19 +144,12 @@ val _ =
|
|||
then (writeln ("Correct error: "^msg^": reported.");thy)
|
||||
else error"Wrong error reported")
|
||||
in Outer_Syntax.command \<^command_keyword>\<open>declare_reference-assert-error\<close>
|
||||
"declare document reference"
|
||||
(ODL_Meta_Args_Parser.attributes -- Parse.document_source
|
||||
>> (Toplevel.theory o create_and_check_docitem))
|
||||
"declare document reference"
|
||||
(ODL_Meta_Args_Parser.attributes -- Parse.document_source
|
||||
>> (Toplevel.theory o create_and_check_docitem))
|
||||
end;
|
||||
|
||||
|
||||
(* fun pass_trans_to_value_cmd args ((name, modes), t) trans =
|
||||
let val pos = Toplevel.pos_of trans
|
||||
in
|
||||
trans |> Toplevel.theory (value_cmd {assert=false} args name modes t @{here})
|
||||
end
|
||||
*)
|
||||
|
||||
val _ =
|
||||
let fun pass_trans_to_value_cmd (args, (((name, modes), t),src)) trans =
|
||||
(Value_Command.value_cmd {assert=false} args name modes t @{here} trans
|
||||
|
@ -179,15 +170,11 @@ val _ =
|
|||
Outer_Syntax.command ("text-latex", \<^here>) "formal comment (primary style)"
|
||||
(Parse.opt_target -- Parse.document_source >> document_command2 {markdown = true});
|
||||
|
||||
|
||||
|
||||
|
||||
\<close>
|
||||
|
||||
(* auto-tests *)
|
||||
(* a little auto-test *)
|
||||
text-latex\<open>dfg\<close>
|
||||
|
||||
text-assert-error[aaaa::A]\<open> @{A \<open>sdf\<close>}\<close>\<open>reference ontologically inconsistent\<close>
|
||||
|
||||
end
|
||||
(*>*)
|
||||
|
|
|
@ -16,12 +16,12 @@ session "Isabelle_DOF" (AFP) = "Functional-Automata" +
|
|||
"thys/Isa_DOF"
|
||||
"thys/Isa_COL"
|
||||
theories [document = true]
|
||||
"thys/manual/01_Introduction"
|
||||
"thys/manual/00_Frontmatter"
|
||||
"thys/manual/02_Background"
|
||||
"thys/manual/05_Implementation"
|
||||
"thys/manual/03_GuidedTour"
|
||||
"thys/manual/04_RefMan"
|
||||
"thys/manual/M_00_Frontmatter"
|
||||
"thys/manual/M_01_Introduction"
|
||||
"thys/manual/M_02_Background"
|
||||
"thys/manual/M_03_GuidedTour"
|
||||
"thys/manual/M_04_RefMan"
|
||||
"thys/manual/M_05_Implementation"
|
||||
"thys/manual/Isabelle_DOF-Manual"
|
||||
document_files
|
||||
"root.bib"
|
||||
|
@ -46,6 +46,11 @@ session "Isabelle_DOF" (AFP) = "Functional-Automata" +
|
|||
"figures/definition-use-CSP-pdf.png"
|
||||
"figures/definition-use-CSP.png"
|
||||
"figures/MyCommentedIsabelle.png"
|
||||
"figures/doc-mod-generic.png"
|
||||
"figures/doc-mod-isar.png"
|
||||
"figures/doc-mod-onto-docinst.png"
|
||||
"figures/doc-mod-DOF.png"
|
||||
"figures/doc-mod-term-aq.png"
|
||||
export_classpath
|
||||
|
||||
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
\input{00_Frontmatter.tex}
|
||||
\input{01_Introduction.tex}
|
||||
\input{02_Background.tex}
|
||||
\input{03_GuidedTour.tex}
|
||||
\input{04_RefMan.tex}
|
||||
\input{05_Implementation.tex}
|
||||
\input{M_00_Frontmatter.tex}
|
||||
\input{M_01_Introduction.tex}
|
||||
\input{M_02_Background.tex}
|
||||
\input{M_03_GuidedTour.tex}
|
||||
\input{M_04_RefMan.tex}
|
||||
\input{M_05_Implementation.tex}
|
||||
\input{Isabelle_DOF-Manual.tex}
|
||||
|
|
After Width: | Height: | Size: 69 KiB |
After Width: | Height: | Size: 56 KiB |
After Width: | Height: | Size: 59 KiB |
After Width: | Height: | Size: 62 KiB |
After Width: | Height: | Size: 46 KiB |
|
@ -73,6 +73,33 @@
|
|||
public = {yes}
|
||||
}
|
||||
|
||||
@InCollection{ brucker.ea:deep-ontologies:2023,
|
||||
author = {Achim D. Brucker and Idir Ait-Sadoune and Nicolas Meric and Burkhart Wolff},
|
||||
booktitle = {9th International Conference on Rigorous State-Based Methods (ABZ 2023)},
|
||||
language = {USenglish},
|
||||
publisher = {Springer-Verlag},
|
||||
address = {Heidelberg},
|
||||
series = {Lecture Notes in Computer Science},
|
||||
number = {XXXXX},
|
||||
title = {{U}sing {D}eep {O}ntologies in {F}ormal {S}oftware {E}ngineering},
|
||||
year = {2023},
|
||||
abstract = {Isabelle/DOF is an ontology framework on top of Isabelle Isabelle/DOF allows for the
|
||||
formal development of ontologies as well as continuous conformity-checking of
|
||||
integrated documents annotated by ontological data. An integrated document may
|
||||
contain text, code, definitions, proofs and user-programmed constructs supporting a
|
||||
wide range of Formal Methods. Isabelle/DOF is designed to leverage traceability in
|
||||
integrated documents by supporting navigation in Isabelle’s IDE as well as the
|
||||
document generation process.
|
||||
In this paper we extend Isabelle/DOF with annotations of terms, a pervasive
|
||||
data-structure underlying Isabelle used to syntactically rep- resent expressions
|
||||
and formulas. Rather than introducing an own pro- gramming language for meta-data,
|
||||
we use Higher-order Logic (HOL) for expressions, data-constraints, ontological
|
||||
invariants, and queries via code-generation and reflection. This allows both for
|
||||
powerful query languages and logical reasoning over ontologies in, for example,
|
||||
ontological mappings. Our application examples cover documents targeting formal
|
||||
certifications such as CENELEC, Common Criteria, etc.}
|
||||
}
|
||||
|
||||
@InCollection{ brucker.ea:isabelle-ontologies:2018,
|
||||
abstract = {While Isabelle is mostly known as part of Isabelle/HOL (an
|
||||
interactive theorem prover), it actually provides a
|
||||
|
|
|
@ -65,7 +65,7 @@
|
|||
\ifcsname isaDof.\commandkey{type}\endcsname%
|
||||
\csname isaDof.\commandkey{type}\endcsname%
|
||||
[label=\commandkey{label},\commandkey{args}]{#1}%
|
||||
\else\relax\fi%
|
||||
\else%
|
||||
\ifcsname isaDof.\commandkey{env}.\commandkey{type}\endcsname%
|
||||
\csname isaDof.\commandkey{env}.\commandkey{type}\endcsname%
|
||||
[label=\commandkey{label},\commandkey{args}]{#1}%
|
||||
|
@ -81,6 +81,7 @@
|
|||
definition for "\commandkey{env}" available either.}%
|
||||
\fi%
|
||||
\fi%
|
||||
\fi%
|
||||
}
|
||||
% end: generic dispatcher
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
|
@ -154,10 +155,14 @@
|
|||
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
% begin: label and ref
|
||||
\newisadof{label}[label=,type=][1]{\label{#1}}
|
||||
\newisadof{ref}[label=,type=][1]{\autoref{#1}}
|
||||
\newisadof{macroDef}[label=,type=][1]{MMM \label{#1}} %% place_holder
|
||||
\newisadof{macroExp}[label=,type=][1]{MMM \autoref{#1}} %% place_holder
|
||||
\newkeycommand\iisaDoflabel[label=,type=][1]{\label{#1}}
|
||||
\def\isaDoflabel{\iisaDoflabel}
|
||||
\newkeycommand\iisaDofref[label=,type=][1]{\autoref{#1}}
|
||||
\def\isaDofref{\iisaDofref}
|
||||
\newkeycommand\iisaDofmacroDef[label=,type=][1]{MMM \label{#1}} %% place_holder
|
||||
\def\isaDofmacroDef{\iisaDofmacroDef}
|
||||
\newkeycommand\iisaDofmacroExp[label=,type=][1]{MMM \autoref{#1}} %% place_holder
|
||||
\def\isaDofmacroExp{\iisaDofmacroExp}
|
||||
% end: label and ref
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
|
||||
|
@ -168,4 +173,6 @@
|
|||
|
||||
% notation
|
||||
\newcommand{\isactrltermUNDERSCORE}{\isakeywordcontrol{term{\isacharunderscore}}}
|
||||
\newcommand{\isactrlvalueUNDERSCORE}{\isakeywordcontrol{value{\isacharunderscore}}}
|
||||
\newcommand{\isactrlvalueUNDERSCORE}{\isakeywordcontrol{value{\isacharunderscore}}}
|
||||
\newcommand{\isasymdoublequote}{\texttt{\upshape"}}
|
||||
\newcommand{\isasymquote}{\texttt{\upshape'}}
|
||||
|
|
|
@ -1,27 +0,0 @@
|
|||
%% Copyright (C) 2020 The University of Paris-Saclay
|
||||
%% 2020 The University of Exeter
|
||||
%%
|
||||
%% License:
|
||||
%% This program can be redistributed and/or modified under the terms
|
||||
%% of the LaTeX Project Public License Distributed from CTAN
|
||||
%% archives in directory macros/latex/base/lppl.txt; either
|
||||
%% version 1.3c of the License, or (at your option) any later version.
|
||||
%% OR
|
||||
%% The 2-clause BSD-style license.
|
||||
%%
|
||||
%% SPDX-License-Identifier: LPPL-1.3c+ OR BSD-2-Clause
|
||||
|
||||
\NeedsTeXFormat{LaTeX2e}\relax
|
||||
\ProvidesPackage{DOF-scholarly_paper-thm}
|
||||
[00/00/0000 Document-Type Support Framework for Isabelle (LNCS).]
|
||||
|
||||
\RequirePackage{amsthm}
|
||||
|
||||
\newtheorem{example}{Example}
|
||||
%\newtheorem{assertion}{Assumption} %% Hack
|
||||
\newtheorem{assumption}{Assumption}
|
||||
\newtheorem{definition}{Definition}
|
||||
\newtheorem{theorem}{Theorem}
|
||||
|
||||
|
||||
|
|
@ -31,13 +31,13 @@
|
|||
\@ifclassloaded{llncs}%
|
||||
{}%
|
||||
{%
|
||||
\RequirePackage{amsthm}
|
||||
\@ifclassloaded{scrartcl}%
|
||||
{%
|
||||
\newcommand{\institute}[1]{}%
|
||||
\newcommand{\inst}[1]{}%
|
||||
\newcommand{\orcidID}[1]{}%
|
||||
\newcommand{\email}[1]{}%
|
||||
\RequirePackage{DOF-scholarly_paper-thm}%
|
||||
}%
|
||||
{%
|
||||
\@ifclassloaded{lipics-v2021}%
|
||||
|
@ -165,18 +165,70 @@
|
|||
\end{isamarkuptext}%
|
||||
}
|
||||
|
||||
\newtheorem{axiom}{Axiom}
|
||||
\newtheorem{ruleX}{Rule} % apparent name-clash with s th from libraries...
|
||||
\newtheorem{assertion}{Assertion}
|
||||
%\RequirePackage{amsthm}
|
||||
%\newtheorem{example}{Example}
|
||||
%\newtheorem{assumption}{Assumption}
|
||||
%\newtheorem{definition}{Definition}
|
||||
%\newtheorem{theorem}{Theorem}
|
||||
\newtheorem{defn}{Definition}
|
||||
\newcommand{\defnautorefname}{Definition}
|
||||
\NewEnviron{isamarkupDefinition*}[1][]{\isaDof[env={text},#1,type={scholarly_paper.math_content}]{\BODY}}
|
||||
\newtheorem{axm}{Axiom}
|
||||
\newcommand{\axmautorefname}{Axiom}
|
||||
\NewEnviron{isamarkupAxiom*}[1][]{\isaDof[env={text},#1,type={scholarly_paper.math_content}]{\BODY}}
|
||||
\newtheorem{theom}{Theorem}
|
||||
\newcommand{\theomautorefname}{Theorem}
|
||||
\NewEnviron{isamarkupTheorem*}[1][]{\isaDof[env={text},#1,type={scholarly_paper.math_content}]{\BODY}}
|
||||
\newtheorem{lemm}{Lemma}
|
||||
\newcommand{\lemmautorefname}{Lemma}
|
||||
\NewEnviron{isamarkupLemma*}[1][]{\isaDof[env={text},#1,type={scholarly_paper.math_content}]{\BODY}}
|
||||
\newtheorem{corr}{Corollary}
|
||||
\newcommand{\corrautorefname}{Corollary}
|
||||
\NewEnviron{isamarkupCorollary*}[1][]{\isaDof[env={text},#1,type={scholarly_paper.math_content}]{\BODY}}
|
||||
\newtheorem{prpo}{Proposition}
|
||||
\newcommand{\prpoautorefname}{Proposition}
|
||||
\NewEnviron{isamarkupProposition*}[1][]{\isaDof[env={text},#1,type={scholarly_paper.math_content}]{\BODY}}
|
||||
\newtheorem{rulE}{Rule}
|
||||
\newcommand{\rulEautorefname}{Rule}
|
||||
\NewEnviron{isamarkupRule*}[1][]{\isaDof[env={text},#1,type={scholarly_paper.math_content}]{\BODY}}
|
||||
\newtheorem{assn}{Assertion}
|
||||
\newcommand{\assnautorefname}{Assertion}
|
||||
\NewEnviron{isamarkupAssertion*}[1][]{\isaDof[env={text},#1,type={scholarly_paper.math_content}]{\BODY}}
|
||||
\newtheorem{hypt}{Hypothesis}
|
||||
\newcommand{\hyptautorefname}{Hypothesis}
|
||||
\NewEnviron{isamarkupHypothesis*}[1][]{\isaDof[env={text},#1,type={scholarly_paper.math_content}]{\BODY}}
|
||||
\newtheorem{assm}{Assumption}
|
||||
\newcommand{\assmautorefname}{Assumption}
|
||||
\NewEnviron{isamarkupAssumption*}[1][]{\isaDof[env={text},#1,type={scholarly_paper.math_content}]{\BODY}}
|
||||
\newtheorem{prms}{Premise}
|
||||
\newcommand{\prmsautorefname}{Premise}
|
||||
\NewEnviron{isamarkupPremise*}[1][]{\isaDof[env={text},#1,type={scholarly_paper.math_content}]{\BODY}}
|
||||
\newtheorem{cons}{Consequence}
|
||||
\newcommand{\consautorefname}{Consequence}
|
||||
\NewEnviron{isamarkupConsequence*}[1][]{\isaDof[env={text},#1,type={scholarly_paper.math_content}]{\BODY}}
|
||||
\newtheorem{conc_stmt}{Conclusion}
|
||||
\newcommand{\concstmtautorefname}{Conclusion}
|
||||
\NewEnviron{isamarkupConclusion*}[1][]{\isaDof[env={text},#1,type={scholarly_paper.math_content}]{\BODY}}
|
||||
\newtheorem{prf_stmt}{Proof}
|
||||
\newcommand{\prfstmtautorefname}{Proof}
|
||||
\NewEnviron{isamarkupProof*}[1][]{\isaDof[env={text},#1,type={scholarly_paper.math_content}]{\BODY}}
|
||||
\newtheorem{expl_stmt}{Example}
|
||||
\newcommand{\explstmtautorefname}{Example}
|
||||
\NewEnviron{isamarkupExample*}[1][]{\isaDof[env={text},#1,type={scholarly_paper.math_content}]{\BODY}}
|
||||
\newtheorem{rmrk}{Remark}
|
||||
\newcommand{\rmrkautorefname}{Remark}
|
||||
\NewEnviron{isamarkupRemark*}[1][]{\isaDof[env={text},#1,type={scholarly_paper.math_content}]{\BODY}}
|
||||
\newtheorem{notn}{Notation}
|
||||
\newcommand{\notnautorefname}{Notation}
|
||||
\NewEnviron{isamarkupNotation*}[1][]{\isaDof[env={text},#1,type={scholarly_paper.math_content}]{\BODY}}
|
||||
\newtheorem{tmgy}{Terminology}
|
||||
\newcommand{\tmgyautorefname}{Terminology}
|
||||
\NewEnviron{isamarkupTerminology*}[1][]{\isaDof[env={text},#1,type={scholarly_paper.math_content}]{\BODY}}
|
||||
|
||||
% \newcommand{\formalMathStmt[label, mcc, ]
|
||||
% end: scholarly_paper.abstract
|
||||
% | "rule" | "assn" | "assm" | "expl" | rem | "notation" | "terminology"
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
\newisadof{text.scholarly_paper.math_content}%
|
||||
[label=,type=%
|
||||
, scholarly_paper.math_content.short_name ={} % {} or \relax
|
||||
, scholarly_paper.math_content.mcc = % "def" | "axm" | "thm" | "lem" | "prop" | "rule" | "assn" | "assm"
|
||||
, scholarly_paper.math_content.short_name ={}%
|
||||
, scholarly_paper.math_content.mcc = %
|
||||
, Isa_COL.text_element.level =%
|
||||
, Isa_COL.text_element.referentiable =%
|
||||
, Isa_COL.text_element.variants =%
|
||||
|
@ -189,317 +241,20 @@
|
|||
[1]
|
||||
{%
|
||||
\begin{isamarkuptext}%
|
||||
\ifthenelse{\equal{\commandkey{scholarly_paper.math_content.mcc}}{defn}}
|
||||
{%
|
||||
{\ifthenelse{\equal{\commandkey{scholarly_paper.math_content.short_name}} {} }
|
||||
{\begin{definition} \label{\commandkey{label}} #1 \end{definition} }
|
||||
{\begin{definition} [\commandkey{scholarly_paper.math_content.short_name}]
|
||||
\label{\commandkey{label}} #1
|
||||
\end{definition}}
|
||||
}%
|
||||
}%
|
||||
{\ifthenelse{\equal{\commandkey{scholarly_paper.math_content.mcc}}{axm}}
|
||||
{%
|
||||
{\ifthenelse{\equal{\commandkey{scholarly_paper.math_content.short_name}} {} }
|
||||
{\begin{axiom} \label{\commandkey{label}} #1 \end{axiom} }
|
||||
{\begin{axiom} [\commandkey{scholarly_paper.math_content.short_name}]
|
||||
\label{\commandkey{label}} #1
|
||||
\end{axiom}}
|
||||
}%
|
||||
}%
|
||||
{\ifthenelse{\equal{\commandkey{scholarly_paper.math_content.mcc}}{thm}}
|
||||
{%
|
||||
{\ifthenelse{\equal{\commandkey{scholarly_paper.math_content.short_name}} {} }
|
||||
{\begin{theorem} \label{\commandkey{label}} #1 \end{theorem} }
|
||||
{\begin{theorem} [\commandkey{scholarly_paper.math_content.short_name}]
|
||||
\label{\commandkey{label}} #1
|
||||
\end{theorem}}
|
||||
}%
|
||||
}%
|
||||
{\ifthenelse{\equal{\commandkey{scholarly_paper.math_content.mcc}}{lem}}
|
||||
{%
|
||||
{\ifthenelse{\equal{\commandkey{scholarly_paper.math_content.short_name}} {} }
|
||||
{\begin{lemma} \label{\commandkey{label}} #1 \end{lemma} }
|
||||
{\begin{lemma} [\commandkey{scholarly_paper.math_content.short_name}]
|
||||
\label{\commandkey{label}} #1
|
||||
\end{lemma}}
|
||||
}%
|
||||
}%
|
||||
{\ifthenelse{\equal{\commandkey{scholarly_paper.math_content.mcc}}{expl}}
|
||||
{%
|
||||
{\ifthenelse{\equal{\commandkey{scholarly_paper.math_content.short_name}} {} }
|
||||
{\begin{example} \label{\commandkey{label}} #1 \end{example} }
|
||||
{\begin{example} [\commandkey{scholarly_paper.math_content.short_name}]
|
||||
\label{\commandkey{label}} #1
|
||||
\end{example}}
|
||||
}%
|
||||
}%
|
||||
{\ifthenelse{\equal{\commandkey{scholarly_paper.math_content.mcc}}{cor}}
|
||||
{%
|
||||
{\ifthenelse{\equal{\commandkey{scholarly_paper.math_content.short_name}} {} }
|
||||
{\begin{corrollary} \label{\commandkey{label}} #1 \end{corrollary} }
|
||||
{\begin{corrollary} [\commandkey{scholarly_paper.math_content.short_name}]
|
||||
\label{\commandkey{label}} #1
|
||||
\end{corrollary}}
|
||||
}%
|
||||
}%
|
||||
{\ifthenelse{\equal{\commandkey{scholarly_paper.math_content.mcc}}{prop}}
|
||||
{%
|
||||
{\ifthenelse{\equal{\commandkey{scholarly_paper.math_content.short_name}} {} }
|
||||
{\begin{property} \label{\commandkey{label}} #1 \end{property} }
|
||||
{\begin{property} [\commandkey{scholarly_paper.math_content.short_name}]
|
||||
\label{\commandkey{label}} #1
|
||||
\end{property}}
|
||||
}%
|
||||
}%
|
||||
{\ifthenelse{\equal{\commandkey{scholarly_paper.math_content.mcc}}{rule}}
|
||||
{%
|
||||
{\ifthenelse{\equal{\commandkey{scholarly_paper.math_content.short_name}} {} }
|
||||
{\begin{ruleX} \label{\commandkey{label}} #1 \end{ruleX} }
|
||||
{\begin{ruleX} [\commandkey{scholarly_paper.math_content.short_name}]
|
||||
\label{\commandkey{label}} #1
|
||||
\end{ruleX}}
|
||||
}%
|
||||
}%
|
||||
{\ifthenelse{\equal{\commandkey{scholarly_paper.math_content.mcc}}{rem}}
|
||||
{%
|
||||
{\ifthenelse{\equal{\commandkey{scholarly_paper.math_content.short_name}} {} }
|
||||
{\begin{remark} \label{\commandkey{label}} #1 \end{remark} }
|
||||
{\begin{remark} [\commandkey{scholarly_paper.math_content.short_name}]
|
||||
\label{\commandkey{label}} #1
|
||||
\end{remark}}
|
||||
}%
|
||||
}%
|
||||
{\ifthenelse{\equal{\commandkey{scholarly_paper.math_content.mcc}}{assn}}
|
||||
{%
|
||||
{\ifthenelse{\equal{\commandkey{scholarly_paper.math_content.short_name}} {} }
|
||||
{\begin{assertion} \label{\commandkey{label}} #1 \end{assertion} }
|
||||
{\begin{assertion} [\commandkey{scholarly_paper.math_content.short_name}]
|
||||
\label{\commandkey{label}} #1
|
||||
\end{assertion}}
|
||||
}%
|
||||
}%
|
||||
{\ifthenelse{\equal{\commandkey{scholarly_paper.math_content.mcc}}{assm}}
|
||||
{%
|
||||
{\ifthenelse{\equal{\commandkey{scholarly_paper.math_content.short_name}} {} }
|
||||
{\begin{assumption} \label{\commandkey{label}} #1 \end{assumption} }
|
||||
{\begin{assumption} [\commandkey{scholarly_paper.math_content.short_name}]
|
||||
\label{\commandkey{label}} #1
|
||||
\end{assumption}}
|
||||
}%
|
||||
}%
|
||||
{\ifthenelse{\equal{\commandkey{scholarly_paper.math_content.mcc}}{notation}}
|
||||
{%
|
||||
{\ifthenelse{\equal{\commandkey{scholarly_paper.math_content.short_name}} {} }
|
||||
{\begin{notation} \label{\commandkey{label}} #1 \end{notation} }
|
||||
{\begin{notation} [\commandkey{scholarly_paper.math_content.short_name}]
|
||||
\label{\commandkey{label}} #1
|
||||
\end{notation}}
|
||||
}%
|
||||
}%
|
||||
{\ifthenelse{\equal{\commandkey{scholarly_paper.math_content.mcc}}{terminology}}
|
||||
{%
|
||||
{\ifthenelse{\equal{\commandkey{scholarly_paper.math_content.short_name}} {} }
|
||||
{\begin{terminology} \label{\commandkey{label}} #1 \end{terminology} }
|
||||
{\begin{terminology} [\commandkey{scholarly_paper.math_content.short_name}]
|
||||
\label{\commandkey{label}} #1
|
||||
\end{terminology}}
|
||||
}%
|
||||
}%
|
||||
{%
|
||||
\typeout{Internal error: enumeration out of sync with ontology scholarly-paper.}
|
||||
}%
|
||||
}%
|
||||
}%
|
||||
}%
|
||||
}%
|
||||
}%
|
||||
}%
|
||||
}%
|
||||
}%
|
||||
}%
|
||||
}%
|
||||
}%
|
||||
}%
|
||||
\ifthenelse{\equal{\commandkey{scholarly_paper.math_content.short_name}} {} }
|
||||
{%
|
||||
\begin{\commandkey{scholarly_paper.math_content.mcc}}\label{\commandkey{label}}
|
||||
#1
|
||||
\end{\commandkey{scholarly_paper.math_content.mcc}}
|
||||
}{%
|
||||
\begin{\commandkey{scholarly_paper.math_content.mcc}}[\commandkey{scholarly_paper.math_content.short_name}]\label{\commandkey{label}}
|
||||
#1
|
||||
\end{\commandkey{scholarly_paper.math_content.mcc}}
|
||||
}
|
||||
\end{isamarkuptext}%
|
||||
}
|
||||
|
||||
|
||||
|
||||
% end: scholarly_paper.math_content
|
||||
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
% Besser: Einfach durchreichen wg Vererbung !
|
||||
\newisadof{text.scholarly_paper.math_example}%
|
||||
[label=,type=%
|
||||
, scholarly_paper.math_content.short_name =%
|
||||
, scholarly_paper.math_content.mcc =%
|
||||
, Isa_COL.text_element.level =%
|
||||
, Isa_COL.text_element.referentiable =%
|
||||
, Isa_COL.text_element.variants =%
|
||||
, scholarly_paper.text_section.main_author =%
|
||||
, scholarly_paper.text_section.fixme_list =%
|
||||
, scholarly_paper.technical.definition_list =%
|
||||
, scholarly_paper.technical.status =%
|
||||
]
|
||||
[1]
|
||||
{%
|
||||
\begin{isamarkuptext}%
|
||||
\ifthenelse{\equal{\commandkey{scholarly_paper.math_content.mcc}}{expl}}{
|
||||
\ifthenelse{\equal{\commandkey{scholarly_paper.math_content.short_name}} {} }
|
||||
{\begin{example} \label{\commandkey{label}} #1 \end{example} }
|
||||
{\begin{example} [\commandkey{scholarly_paper.math_content.short_name}]
|
||||
\label{\commandkey{label}} #1
|
||||
\end{example}}
|
||||
}{%
|
||||
#1%
|
||||
}%
|
||||
\end{isamarkuptext}%
|
||||
}
|
||||
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
\NewEnviron{isamarkupDefinition*}[1][]{\isaDof[env={text},#1]{\BODY}}
|
||||
\newisadof{text.scholarly_paper.definition}%
|
||||
[label=,type=%
|
||||
, scholarly_paper.math_content.short_name =%
|
||||
, scholarly_paper.math_content.mcc =%
|
||||
, Isa_COL.text_element.level =%
|
||||
, Isa_COL.text_element.referentiable =%
|
||||
, Isa_COL.text_element.variants =%
|
||||
, scholarly_paper.text_section.main_author =%
|
||||
, scholarly_paper.text_section.fixme_list =%
|
||||
, Isa_COL.text_element.level =%
|
||||
, scholarly_paper.technical.definition_list =%
|
||||
, scholarly_paper.technical.status =%
|
||||
]
|
||||
[1]
|
||||
{%
|
||||
\begin{isamarkuptext}%
|
||||
\ifthenelse{\equal{\commandkey{scholarly_paper.math_content.mcc}}{defn}}{
|
||||
\ifthenelse{\equal{\commandkey{scholarly_paper.math_content.short_name}} {} }
|
||||
{\begin{definition} \label{\commandkey{label}} #1 \end{definition} }
|
||||
{\begin{definition} [\commandkey{scholarly_paper.math_content.short_name}] \label{\commandkey{label}} #1 \end{definition}}
|
||||
}{%
|
||||
#1%
|
||||
}%
|
||||
\end{isamarkuptext}%
|
||||
}
|
||||
|
||||
% An experiment in inheritance of the default behaviour.
|
||||
% \newisadof{text.scholarly_paper.definition}%
|
||||
% [label=,type=%
|
||||
% , scholarly_paper.math_content.short_name =%
|
||||
% , scholarly_paper.math_content.mcc =%
|
||||
% , Isa_COL.text_element.level =%
|
||||
% , Isa_COL.text_element.referentiable =%
|
||||
% , Isa_COL.text_element.variants =%
|
||||
% , scholarly_paper.text_section.main_author =%
|
||||
% , scholarly_paper.text_section.fixme_list =%
|
||||
% , scholarly_paper.technical.definition_list =%
|
||||
% , scholarly_paper.technical.status =%
|
||||
% ]
|
||||
% [1]
|
||||
% {%
|
||||
% \cscommand{text.scholarly_paper.math_content}%
|
||||
% [label=\commandkey{label},type=\commandkey{type}%
|
||||
% , scholarly_paper.math_content.short_name =\commandkey{scholarly_paper.math_content.short_name}%
|
||||
% , scholarly_paper.math_content.mcc =\commandkey{scholarly_paper.math_content.mcc}%
|
||||
% , Isa_COL.text_element.level =\commandkey{Isa_COL.text_element.level}%
|
||||
% , Isa_COL.text_element.referentiable =\commandkey{Isa_COL.text_element.referentiable}%
|
||||
% , Isa_COL.text_element.variants =\commandkey{Isa_COL.text_element.variants}%
|
||||
% , scholarly_paper.text_section.main_author =\commandkey{scholarly_paper.text_section.main_author}%
|
||||
% , scholarly_paper.text_section.fixme_list =\commandkey{scholarly_paper.text_section.fixme_list}%
|
||||
% , scholarly_paper.technical.definition_list =\commandkey{scholarly_paper.technical.definition_list}%
|
||||
% , scholarly_paper.technical.status =\commandkey{scholarly_paper.technical.status}%
|
||||
% ]
|
||||
% {#1}%
|
||||
% }
|
||||
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
%% Besser: Einfach durchreichen wg Vererbung !
|
||||
\NewEnviron{isamarkupLemma*}[1][]{\isaDof[env={text.scholarly_paper.lemma},#1]{\BODY}}
|
||||
\newisadof{text.scholarly_paper.lemma}%
|
||||
[label=,type=%
|
||||
, scholarly_paper.math_content.short_name =%
|
||||
, scholarly_paper.math_content.mcc =%
|
||||
, Isa_COL.text_element.level =%
|
||||
, Isa_COL.text_element.referentiable =%
|
||||
, Isa_COL.text_element.variants =%
|
||||
, scholarly_paper.text_section.main_author =%
|
||||
, scholarly_paper.text_section.fixme_list =%
|
||||
, Isa_COL.text_element.level =%
|
||||
, scholarly_paper.technical.definition_list =%
|
||||
, scholarly_paper.technical.status =%
|
||||
]
|
||||
[1]
|
||||
{%
|
||||
\begin{isamarkuptext}%
|
||||
\ifthenelse{\equal{\commandkey{scholarly_paper.math_content.mcc}}{lem}}{
|
||||
\ifthenelse{\equal{\commandkey{scholarly_paper.math_content.short_name}} {} }
|
||||
{\begin{lemma} \label{\commandkey{label}} #1 \end{lemma} }
|
||||
{\begin{lemma} [\commandkey{scholarly_paper.math_content.short_name}] \label{\commandkey{label}} #1 \end{lemma}}
|
||||
}{%
|
||||
#1%
|
||||
}%
|
||||
\end{isamarkuptext}%
|
||||
}
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
% Besser: Einfach durchreichen wg Vererbung !
|
||||
\NewEnviron{isamarkupTheorem*}[1][]{\isaDof[env={text.scholarly_paper.theorem},#1]{\BODY}}
|
||||
\newisadof{text.scholarly_paper.theorem}%
|
||||
[label=,type=%
|
||||
, scholarly_paper.math_content.short_name =%
|
||||
, scholarly_paper.math_content.mcc =%
|
||||
, Isa_COL.text_element.level =%
|
||||
, Isa_COL.text_element.referentiable =%
|
||||
, Isa_COL.text_element.variants =%
|
||||
, scholarly_paper.text_section.main_author =%
|
||||
, scholarly_paper.text_section.fixme_list =%
|
||||
, Isa_COL.text_element.level =%
|
||||
, scholarly_paper.technical.definition_list =%
|
||||
, scholarly_paper.technical.status =%
|
||||
]
|
||||
[1]
|
||||
{%
|
||||
\begin{isamarkuptext}%
|
||||
\ifthenelse{\equal{\commandkey{scholarly_paper.math_content.mcc}}{thm}}{
|
||||
\ifthenelse{\equal{\commandkey{scholarly_paper.math_content.short_name}} {} }
|
||||
{\begin{theorem} \label{\commandkey{label}} #1 \end{theorem} }
|
||||
{\begin{theorem} [\commandkey{scholarly_paper.math_content.short_name}] \label{\commandkey{label}} #1 \end{theorem}}
|
||||
}{%
|
||||
#1%
|
||||
}%
|
||||
\end{isamarkuptext}%
|
||||
}
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
% Generic Example. Different inheritance hierachy.
|
||||
\newisadof{text.scholarly_paper.example}%
|
||||
[label=,type=%
|
||||
, Isa_COL.text_element.level =%
|
||||
, Isa_COL.text_element.referentiable =%
|
||||
, Isa_COL.text_element.variants =%
|
||||
, scholarly_paper.text_section.main_author =%
|
||||
, scholarly_paper.text_section.fixme_list =%
|
||||
, scholarly_paper.technical.definition_list =%
|
||||
, scholarly_paper.example.status =%
|
||||
, scholarly_paper.example.short_name =%
|
||||
]
|
||||
[1]
|
||||
{%
|
||||
\begin{isamarkuptext}%
|
||||
\ifthenelse{\equal{\commandkey{scholarly_paper.example.status}}{semiformal}}{
|
||||
\ifthenelse{\equal{\commandkey{scholarly_paper.example.short_name}} {} }
|
||||
{\begin{example} \label{\commandkey{label}} #1 \end{example} }
|
||||
{\begin{example} [\commandkey{scholarly_paper.example.short_name}] %
|
||||
\label{\commandkey{label}} #1 %
|
||||
\end{example}} %
|
||||
}%
|
||||
{%
|
||||
#1%
|
||||
}%
|
||||
\end{isamarkuptext}%
|
||||
}
|
||||
|
||||
|
||||
%% Miscellaneous
|
||||
\usepackage{xspace}
|
||||
|
@ -507,3 +262,4 @@
|
|||
\newcommand{\eg}{e.\,g.\xspace}
|
||||
\newcommand{\etc}{etc}
|
||||
|
||||
|
||||
|
|
|
@ -11,12 +11,15 @@
|
|||
* SPDX-License-Identifier: BSD-2-Clause
|
||||
*************************************************************************)
|
||||
|
||||
section\<open>An example ontology for scientific, MINT-oriented papers.\<close>
|
||||
chapter\<open>An example ontology for scientific, MINT-oriented papers.\<close>
|
||||
|
||||
theory scholarly_paper
|
||||
imports "Isabelle_DOF.Isa_COL"
|
||||
keywords "author*" "abstract*"
|
||||
"Definition*" "Lemma*" "Theorem*" :: document_body
|
||||
keywords "author*" "abstract*" :: document_body
|
||||
and "Proposition*" "Definition*" "Lemma*" "Theorem*" :: document_body
|
||||
and "Premise*" "Corollary*" "Consequence*" "Conclusion*" :: document_body
|
||||
and "Assumption*" "Hypothesis*" "Assertion*" :: document_body
|
||||
and "Proof*" "Example*" :: document_body
|
||||
begin
|
||||
|
||||
define_ontology "DOF-scholarly_paper.sty" "Writing academic publications."
|
||||
|
@ -25,7 +28,7 @@ define_ontology "DOF-scholarly_paper.sty" "Writing academic publications."
|
|||
text\<open>Scholarly Paper provides a number of standard text - elements for scientific papers.
|
||||
They were introduced in the following.\<close>
|
||||
|
||||
subsection\<open>General Paper Structuring Elements\<close>
|
||||
section\<open>General Paper Structuring Elements\<close>
|
||||
doc_class title =
|
||||
short_title :: "string option" <= "None"
|
||||
|
||||
|
@ -50,12 +53,12 @@ ML\<open>
|
|||
val _ =
|
||||
Monitor_Command_Parser.document_command \<^command_keyword>\<open>abstract*\<close> "Textual Definition"
|
||||
{markdown = true, body = true}
|
||||
(Onto_Macros.enriched_document_cmd_exp (SOME "abstract") []);
|
||||
(Onto_Macros.enriched_document_cmd_exp (SOME "abstract") []) (K(K I));
|
||||
|
||||
val _ =
|
||||
Monitor_Command_Parser.document_command \<^command_keyword>\<open>author*\<close> "Textual Definition"
|
||||
{markdown = true, body = true}
|
||||
(Onto_Macros.enriched_document_cmd_exp (SOME "author") []);
|
||||
(Onto_Macros.enriched_document_cmd_exp (SOME "author") []) (K(K I));
|
||||
\<close>
|
||||
|
||||
text\<open>Scholarly Paper is oriented towards the classical domains in science:
|
||||
|
@ -102,8 +105,6 @@ datatype sc_dom = math | info | natsc | eng
|
|||
*)
|
||||
|
||||
|
||||
subsection\<open>Introductions\<close>
|
||||
|
||||
doc_class introduction = text_section +
|
||||
comment :: string
|
||||
claims :: "thm list"
|
||||
|
@ -125,7 +126,7 @@ doc_class background = text_section +
|
|||
invariant L\<^sub>b\<^sub>a\<^sub>c\<^sub>k :: "(level \<sigma>) \<noteq> None \<and> the (level \<sigma>) > 0"
|
||||
|
||||
|
||||
subsection\<open>Technical Content and its Formats\<close>
|
||||
section\<open>Technical Content and Free-form Semi-formal Formats\<close>
|
||||
|
||||
datatype status = formal | semiformal | description
|
||||
|
||||
|
@ -158,38 +159,59 @@ doc_class example = text_section +
|
|||
short_name :: string <= "''''"
|
||||
invariant L\<^sub>e\<^sub>x\<^sub>a :: "(level \<sigma>) \<noteq> None \<and> the (level \<sigma>) > 0"
|
||||
|
||||
text\<open>The intended use for the \<open>doc_class\<close>es \<^verbatim>\<open>math_motivation\<close> (or \<^verbatim>\<open>math_mtv\<close> for short),
|
||||
\<^verbatim>\<open>math_explanation\<close> (or \<^verbatim>\<open>math_exp\<close> for short) and
|
||||
\<^verbatim>\<open>math_example\<close> (or \<^verbatim>\<open>math_ex\<close> for short)
|
||||
are \<^emph>\<open>informal\<close> descriptions of semi-formal definitions (by inheritance).
|
||||
Math-Examples can be made referentiable triggering explicit, numbered presentations.\<close>
|
||||
doc_class math_motivation = tc +
|
||||
referentiable :: bool <= False
|
||||
type_synonym math_mtv = math_motivation
|
||||
|
||||
doc_class math_explanation = tc +
|
||||
referentiable :: bool <= False
|
||||
type_synonym math_exp = math_explanation
|
||||
|
||||
|
||||
subsection\<open>Freeform Mathematical Content\<close>
|
||||
|
||||
text\<open>We follow in our enumeration referentiable mathematical content class the AMS style and its
|
||||
provided \<^emph>\<open>theorem environments\<close> (see \<^verbatim>\<open>texdoc amslatex\<close>). We add, however, the concepts
|
||||
\<^verbatim>\<open>axiom\<close>, \<^verbatim>\<open>rule\<close> and \<^verbatim>\<open>assertion\<close> to the list. A particular advantage of \<^verbatim>\<open>texdoc amslatex\<close> is
|
||||
that it is well-established and compatible with many LaTeX - styles.\<close>
|
||||
that it is well-established and compatible with many LaTeX - styles.
|
||||
|
||||
datatype math_content_class = "defn" | "axm" | "thm" | "lem" | "cor" | "prop"
|
||||
| "expl" | "rule" | "assn" | "assm"
|
||||
| rem | "notation" | "terminology"
|
||||
The names for thhe following key's are deliberate non-standard abbreviations in order to avoid
|
||||
future name clashes.\<close>
|
||||
|
||||
datatype math_content_class =
|
||||
"defn" \<comment>\<open>definition\<close>
|
||||
| "axm" \<comment>\<open>axiom\<close>
|
||||
| "theom" \<comment>\<open>theorem\<close>
|
||||
| "lemm" \<comment>\<open>lemma\<close>
|
||||
| "corr" \<comment>\<open>corollary\<close>
|
||||
| "prpo" \<comment>\<open>proposition\<close>
|
||||
| "rulE" \<comment>\<open>rule\<close>
|
||||
| "assn" \<comment>\<open>assertion\<close>
|
||||
| "hypt" \<comment>\<open>hypothesis\<close>
|
||||
| "assm" \<comment>\<open>assumption\<close>
|
||||
| "prms" \<comment>\<open>premise\<close>
|
||||
| "cons" \<comment>\<open>consequence\<close>
|
||||
| "conc_stmt" \<comment>\<open>math. conclusion\<close>
|
||||
| "prf_stmt" \<comment>\<open>math. proof\<close>
|
||||
| "expl_stmt" \<comment>\<open>math. example\<close>
|
||||
| "rmrk" \<comment>\<open>remark\<close>
|
||||
| "notn" \<comment>\<open>notation\<close>
|
||||
| "tmgy" \<comment>\<open>terminology\<close>
|
||||
|
||||
(*
|
||||
thm Theorem Italic
|
||||
cor Corollary Italic
|
||||
lem Lemma Italic
|
||||
prop Proposition
|
||||
defn Definition
|
||||
expl Example
|
||||
|
||||
rem Remark
|
||||
notation
|
||||
terminology
|
||||
*)
|
||||
text\<open>Instances of the \<open>doc_class\<close> \<^verbatim>\<open>math_content\<close> are by definition @{term "semiformal"}; they may
|
||||
be non-referential, but in this case they will not have a @{term "short_name"}.\<close>
|
||||
|
||||
doc_class math_content = tc +
|
||||
referentiable :: bool <= True
|
||||
referentiable :: bool <= False
|
||||
short_name :: string <= "''''"
|
||||
status :: status <= "semiformal"
|
||||
mcc :: "math_content_class" <= "thm"
|
||||
mcc :: "math_content_class" <= "theom"
|
||||
invariant s1 :: "\<not>referentiable \<sigma> \<longrightarrow> short_name \<sigma> = ''''"
|
||||
invariant s2 :: "technical.status \<sigma> = semiformal"
|
||||
type_synonym math_tc = math_content
|
||||
|
@ -205,66 +227,129 @@ Sub-classes can englobe instances such as:
|
|||
2) ...
|
||||
\<close>\<close>
|
||||
\<^item> semi-formal descriptions, which are free-form mathematical definitions on which finally
|
||||
an attribute with a formal Isabelle definition is attached.
|
||||
|
||||
\<close>
|
||||
an attribute with a formal Isabelle definition is attached. \<close>
|
||||
|
||||
|
||||
(* type qualification is a work around *)
|
||||
text\<open>Instances of the Free-form Content are Definition, Lemma, Assumption, Hypothesis, etc.
|
||||
The key class definitions are inspired by the AMS style, to which some target LaTeX's compile.\<close>
|
||||
|
||||
text\<open>The intended use for the \<open>doc_class\<close>es \<^verbatim>\<open>math_motivation\<close> (or \<^verbatim>\<open>math_mtv\<close> for short),
|
||||
\<^verbatim>\<open>math_explanation\<close> (or \<^verbatim>\<open>math_exp\<close> for short) and
|
||||
\<^verbatim>\<open>math_example\<close> (or \<^verbatim>\<open>math_ex\<close> for short)
|
||||
are \<^emph>\<open>informal\<close> descriptions of semi-formal definitions (by inheritance).
|
||||
Math-Examples can be made referentiable triggering explicit, numbered presentations.\<close>
|
||||
doc_class math_motivation = tc +
|
||||
referentiable :: bool <= False
|
||||
type_synonym math_mtv = math_motivation
|
||||
text\<open>A proposition (or: "sentence") is a central concept in philosophy of language and related
|
||||
fields, often characterized as the primary bearer of truth or falsity. Propositions are also often
|
||||
characterized as being the kind of thing that declarative sentences denote. \<close>
|
||||
|
||||
doc_class math_explanation = tc +
|
||||
referentiable :: bool <= False
|
||||
type_synonym math_exp = math_explanation
|
||||
|
||||
|
||||
text\<open>The intended use for the \<open>doc_class\<close> \<^verbatim>\<open>math_semiformal_statement\<close> (or \<^verbatim>\<open>math_sfs\<close> for short)
|
||||
are semi-formal mathematical content (definition, lemma, etc.). They are referentiable entities.
|
||||
They are NOT formal, i.e. Isabelle-checked formal content, but can be in close link to these.\<close>
|
||||
doc_class math_semiformal = math_content +
|
||||
doc_class "proposition" = math_content +
|
||||
referentiable :: bool <= True
|
||||
type_synonym math_sfc = math_semiformal
|
||||
|
||||
subsection\<open>Instances of the abstract classes Definition / Class / Lemma etc.\<close>
|
||||
|
||||
text\<open>The key class definitions are motivated by the AMS style.\<close>
|
||||
level :: "int option" <= "Some 2"
|
||||
mcc :: "math_content_class" <= "prpo"
|
||||
invariant d0 :: "mcc \<sigma> = prpo" (* can not be changed anymore. *)
|
||||
|
||||
text\<open>A definition is used to give a precise meaning to a new term, by describing a
|
||||
condition which unambiguously qualifies what a mathematical term is and is not. Definitions and
|
||||
axioms form the basis on which all of modern mathematics is to be constructed.\<close>
|
||||
doc_class "definition" = math_content +
|
||||
referentiable :: bool <= True
|
||||
level :: "int option" <= "Some 2"
|
||||
mcc :: "math_content_class" <= "defn"
|
||||
invariant d1 :: "mcc \<sigma> = defn" (* can not be changed anymore. *)
|
||||
|
||||
doc_class "axiom" = math_content +
|
||||
referentiable :: bool <= True
|
||||
level :: "int option" <= "Some 2"
|
||||
mcc :: "math_content_class" <= "axm"
|
||||
invariant d2 :: "mcc \<sigma> = axm" (* can not be changed anymore. *)
|
||||
|
||||
text\<open>A lemma (plural lemmas or lemmata) is a generally minor, proven proposition which is used as
|
||||
a stepping stone to a larger result. For that reason, it is also known as a "helping theorem" or an
|
||||
"auxiliary theorem". In many cases, a lemma derives its importance from the theorem it aims to prove.\<close>
|
||||
doc_class "lemma" = math_content +
|
||||
referentiable :: bool <= "True"
|
||||
level :: "int option" <= "Some 2"
|
||||
mcc :: "math_content_class" <= "lemm"
|
||||
invariant d3 :: "mcc \<sigma> = lemm"
|
||||
|
||||
doc_class "theorem" = math_content +
|
||||
referentiable :: bool <= True
|
||||
mcc :: "math_content_class" <= "thm"
|
||||
invariant d2 :: "mcc \<sigma> = thm"
|
||||
level :: "int option" <= "Some 2"
|
||||
mcc :: "math_content_class" <= "theom"
|
||||
invariant d4 :: "mcc \<sigma> = theom"
|
||||
|
||||
doc_class "lemma" = math_content +
|
||||
text\<open>A corollary is a theorem of less importance which can be readily deduced from a previous,
|
||||
more notable lemma or theorem. A corollary could, for instance, be a proposition which is incidentally
|
||||
proved while proving another proposition.\<close>
|
||||
doc_class "corollary" = math_content +
|
||||
referentiable :: bool <= "True"
|
||||
mcc :: "math_content_class" <= "lem"
|
||||
invariant d3 :: "mcc \<sigma> = lem"
|
||||
level :: "int option" <= "Some 2"
|
||||
mcc :: "math_content_class" <= "corr"
|
||||
invariant d5 :: "mcc \<sigma> = corr"
|
||||
|
||||
doc_class "corollary" = math_content +
|
||||
|
||||
text\<open>A premise or premiss[a] is a proposition — a true or false declarative statement—
|
||||
used in an argument to prove the truth of another proposition called the conclusion.\<close>
|
||||
doc_class "premise" = math_content +
|
||||
referentiable :: bool <= "True"
|
||||
mcc :: "math_content_class" <= "cor"
|
||||
invariant d4 :: "mcc \<sigma> = thm"
|
||||
level :: "int option" <= "Some 2"
|
||||
mcc :: "math_content_class" <= "prms"
|
||||
invariant d6 :: "mcc \<sigma> = prms"
|
||||
|
||||
doc_class "math_example" = math_content +
|
||||
text\<open>A consequence describes the relationship between statements that hold true when one statement
|
||||
logically follows from one or more statements. A valid logical argument is one in which the
|
||||
conclusion is entailed by the premises, because the conclusion is the consequence of the premises.
|
||||
The philosophical analysis of logical consequence involves the questions: In what sense does a
|
||||
conclusion follow from its premises?\<close>
|
||||
doc_class "consequence" = math_content +
|
||||
referentiable :: bool <= "True"
|
||||
mcc :: "math_content_class" <= "expl"
|
||||
invariant d5 :: "mcc \<sigma> = expl"
|
||||
level :: "int option" <= "Some 2"
|
||||
mcc :: "math_content_class" <= "cons"
|
||||
invariant d7 :: "mcc \<sigma> = cons"
|
||||
|
||||
doc_class "conclusion_stmt" = math_content + \<comment> \<open>not to confuse with a section element: Conclusion\<close>
|
||||
referentiable :: bool <= "True"
|
||||
level :: "int option" <= "Some 2"
|
||||
mcc :: "math_content_class" <= "conc_stmt"
|
||||
invariant d8 :: "mcc \<sigma> = conc_stmt"
|
||||
|
||||
text\<open>An assertion is a statement that asserts that a certain premise is true.\<close>
|
||||
doc_class "assertion" = math_content +
|
||||
referentiable :: bool <= "True"
|
||||
level :: "int option" <= "Some 2"
|
||||
mcc :: "math_content_class" <= "assn"
|
||||
invariant d9 :: "mcc \<sigma> = assn"
|
||||
|
||||
text\<open>An assumption is an explicit or a tacit proposition about the world or a background belief
|
||||
relating to an proposition.\<close>
|
||||
doc_class "assumption" = math_content +
|
||||
referentiable :: bool <= "True"
|
||||
level :: "int option" <= "Some 2"
|
||||
mcc :: "math_content_class" <= "assm"
|
||||
invariant d10 :: "mcc \<sigma> = assm"
|
||||
|
||||
text\<open> A working hypothesis is a provisionally accepted hypothesis proposed for further research
|
||||
in a process beginning with an educated guess or thought.
|
||||
|
||||
A different meaning of the term hypothesis is used in formal logic, to denote the antecedent of a
|
||||
proposition; thus in the proposition "If \<open>P\<close>, then \<open>Q\<close>", \<open>P\<close> denotes the hypothesis (or antecedent);
|
||||
\<open>Q\<close> can be called a consequent. \<open>P\<close> is the assumption in a (possibly counterfactual) What If question.\<close>
|
||||
doc_class "hypothesis" = math_content +
|
||||
referentiable :: bool <= "True"
|
||||
level :: "int option" <= "Some 2"
|
||||
mcc :: "math_content_class" <= "hypt"
|
||||
invariant d11 :: "mcc \<sigma> = hypt"
|
||||
|
||||
doc_class "math_proof" = math_content +
|
||||
referentiable :: bool <= "True"
|
||||
level :: "int option" <= "Some 2"
|
||||
mcc :: "math_content_class" <= "prf_stmt"
|
||||
invariant d12 :: "mcc \<sigma> = prf_stmt"
|
||||
|
||||
doc_class "math_example"= math_content +
|
||||
referentiable :: bool <= "True"
|
||||
level :: "int option" <= "Some 2"
|
||||
mcc :: "math_content_class" <= "expl_stmt"
|
||||
invariant d13 :: "mcc \<sigma> = expl_stmt"
|
||||
|
||||
|
||||
subsubsection\<open>Ontological Macros \<^verbatim>\<open>Definition*\<close> , \<^verbatim>\<open>Lemma**\<close>, \<^verbatim>\<open>Theorem*\<close> ... \<close>
|
||||
|
||||
subsection\<open>Support of Command Macros \<^verbatim>\<open>Definition*\<close> , \<^verbatim>\<open>Lemma**\<close>, \<^verbatim>\<open>Theorem*\<close> ... \<close>
|
||||
|
||||
text\<open>These ontological macros allow notations are defined for the class
|
||||
\<^typ>\<open>math_content\<close> in order to allow for a variety of free-form formats;
|
||||
|
@ -279,60 +364,130 @@ text\<open>These ontological macros allow notations are defined for the class
|
|||
supports subsequent abbreviations:
|
||||
|
||||
\<^theory_text>\<open>Definition*[l]\<open>...\<close>\<close>.
|
||||
|
||||
Via the default classes, it is also possible to specify the precise concept
|
||||
that are not necessarily in the same inheritance hierarchy: for example:
|
||||
\<^item> mathematical definition vs terminological setting
|
||||
\<^item> mathematical example vs. technical example.
|
||||
\<^item> mathematical proof vs. some structured argument
|
||||
\<^item> mathematical hypothesis vs. philosophical/metaphysical assumption
|
||||
\<^item> ...
|
||||
|
||||
By setting the default classes, it is possible to reuse these syntactic support and give them
|
||||
different interpretations in an underlying ontological class-tree.
|
||||
|
||||
\<close>
|
||||
|
||||
ML\<open>
|
||||
|
||||
val (Proposition_default_class, Proposition_default_class_setup)
|
||||
= Attrib.config_string \<^binding>\<open>Proposition_default_class\<close> (K "");
|
||||
val (Definition_default_class, Definition_default_class_setup)
|
||||
= Attrib.config_string \<^binding>\<open>Definition_default_class\<close> (K "");
|
||||
val (Axiom_default_class, Axiom_default_class_setup)
|
||||
= Attrib.config_string \<^binding>\<open>Axiom_default_class\<close> (K "");
|
||||
val (Lemma_default_class, Lemma_default_class_setup)
|
||||
= Attrib.config_string \<^binding>\<open>Lemma_default_class\<close> (K "");
|
||||
val (Theorem_default_class, Theorem_default_class_setup)
|
||||
= Attrib.config_string \<^binding>\<open>Theorem_default_class\<close> (K "");
|
||||
|
||||
val (Corollary_default_class, Corollary_default_class_setup)
|
||||
= Attrib.config_string \<^binding>\<open>Corollary_default_class\<close> (K "");
|
||||
val (Assumption_default_class, Assumption_default_class_setup)
|
||||
= Attrib.config_string \<^binding>\<open>Assumption_default_class\<close> (K "");
|
||||
val (Assertion_default_class, Assertion_default_class_setup)
|
||||
= Attrib.config_string \<^binding>\<open>Assertion_default_class\<close> (K "");
|
||||
val (Consequence_default_class, Consequence_default_class_setup)
|
||||
= Attrib.config_string \<^binding>\<open>Consequence_default_class\<close> (K "");
|
||||
val (Conclusion_default_class, Conclusion_default_class_setup)
|
||||
= Attrib.config_string \<^binding>\<open>Conclusion_default_class\<close> (K "");
|
||||
val (Premise_default_class, Premise_default_class_setup)
|
||||
= Attrib.config_string \<^binding>\<open>Premise_default_class\<close> (K "");
|
||||
val (Hypothesis_default_class, Hypothesis_default_class_setup)
|
||||
= Attrib.config_string \<^binding>\<open>Hypothesis_default_class\<close> (K "");
|
||||
val (Proof_default_class, Proof_default_class_setup)
|
||||
= Attrib.config_string \<^binding>\<open>Proof_default_class\<close> (K "");
|
||||
val (Example_default_class, Example_default_class_setup)
|
||||
= Attrib.config_string \<^binding>\<open>Example_default_class\<close> (K "");
|
||||
val (Remark_default_class, Remark_default_class_setup)
|
||||
= Attrib.config_string \<^binding>\<open>Remark_default_class\<close> (K "");
|
||||
val (Notation_default_class, Notation_default_class_setup)
|
||||
= Attrib.config_string \<^binding>\<open>Notation_default_class\<close> (K "");
|
||||
|
||||
\<close>
|
||||
setup\<open>Definition_default_class_setup\<close>
|
||||
setup\<open>Lemma_default_class_setup\<close>
|
||||
setup\<open>Theorem_default_class_setup\<close>
|
||||
|
||||
ML\<open> local open ODL_Meta_Args_Parser in
|
||||
setup\<open> Proposition_default_class_setup
|
||||
#> Definition_default_class_setup
|
||||
#> Axiom_default_class_setup
|
||||
#> Lemma_default_class_setup
|
||||
#> Theorem_default_class_setup
|
||||
#> Corollary_default_class_setup
|
||||
#> Assertion_default_class_setup
|
||||
#> Assumption_default_class_setup
|
||||
#> Premise_default_class_setup
|
||||
#> Hypothesis_default_class_setup
|
||||
#> Consequence_default_class_setup
|
||||
#> Conclusion_default_class_setup
|
||||
#> Proof_default_class_setup
|
||||
#> Remark_default_class_setup
|
||||
#> Example_default_class_setup\<close>
|
||||
|
||||
val _ =
|
||||
Monitor_Command_Parser.document_command \<^command_keyword>\<open>Definition*\<close> "Textual Definition"
|
||||
{markdown = true, body = true}
|
||||
ML\<open>
|
||||
local open ODL_Meta_Args_Parser in
|
||||
|
||||
local
|
||||
fun doc_cmd kwd txt flag key =
|
||||
Monitor_Command_Parser.document_command kwd txt {markdown = true, body = true}
|
||||
(fn meta_args => fn thy =>
|
||||
let
|
||||
val ddc = Config.get_global thy Definition_default_class
|
||||
val use_Definition_default = SOME(((ddc = "") ? (K "math_content")) ddc)
|
||||
val ddc = Config.get_global thy flag
|
||||
val default = SOME(((ddc = "") ? (K \<^const_name>\<open>math_content\<close>)) ddc)
|
||||
in
|
||||
Onto_Macros.enriched_formal_statement_command
|
||||
use_Definition_default [("mcc","defn")] meta_args thy
|
||||
end);
|
||||
Onto_Macros.enriched_formal_statement_command default [("mcc",key)] meta_args thy
|
||||
end)
|
||||
(Onto_Macros.transform_attr [("mcc",key)])
|
||||
|
||||
val _ =
|
||||
Monitor_Command_Parser.document_command \<^command_keyword>\<open>Lemma*\<close> "Textual Lemma Outline"
|
||||
{markdown = true, body = true}
|
||||
(fn meta_args => fn thy =>
|
||||
let
|
||||
val ddc = Config.get_global thy Lemma_default_class
|
||||
val use_Lemma_default = SOME(((ddc = "") ? (K "math_content")) ddc)
|
||||
in
|
||||
Onto_Macros.enriched_formal_statement_command
|
||||
use_Lemma_default [("mcc","lem")] meta_args thy
|
||||
end);
|
||||
in
|
||||
|
||||
val _ =
|
||||
Monitor_Command_Parser.document_command \<^command_keyword>\<open>Theorem*\<close> "Textual Theorem Outline"
|
||||
{markdown = true, body = true}
|
||||
(fn meta_args => fn thy =>
|
||||
let
|
||||
val ddc = Config.get_global thy Theorem_default_class
|
||||
val use_Theorem_default = SOME(((ddc = "") ? (K "math_content")) ddc)
|
||||
in
|
||||
Onto_Macros.enriched_formal_statement_command
|
||||
use_Theorem_default [("mcc","thm")] meta_args thy
|
||||
end);
|
||||
val _ = doc_cmd \<^command_keyword>\<open>Definition*\<close> "Freeform Definition"
|
||||
Definition_default_class "defn";
|
||||
|
||||
val _ = doc_cmd \<^command_keyword>\<open>Lemma*\<close> "Freeform Lemma Description"
|
||||
Lemma_default_class "lemm";
|
||||
|
||||
val _ = doc_cmd \<^command_keyword>\<open>Theorem*\<close> "Freeform Theorem"
|
||||
Theorem_default_class "theom";
|
||||
|
||||
(* cut and paste to make it runnable, but nonsensical so far: *)
|
||||
val _ = doc_cmd \<^command_keyword>\<open>Proposition*\<close> "Freeform Proposition"
|
||||
Proposition_default_class "prpo";
|
||||
|
||||
val _ = doc_cmd \<^command_keyword>\<open>Premise*\<close> "Freeform Premise"
|
||||
Premise_default_class "prms";
|
||||
|
||||
val _ = doc_cmd \<^command_keyword>\<open>Corollary*\<close> "Freeform Corollary"
|
||||
Corollary_default_class "corr";
|
||||
|
||||
val _ = doc_cmd \<^command_keyword>\<open>Consequence*\<close> "Freeform Consequence"
|
||||
Consequence_default_class "cons";
|
||||
|
||||
val _ = doc_cmd \<^command_keyword>\<open>Conclusion*\<close> "Freeform Conclusion"
|
||||
Conclusion_default_class "conc_stmt";
|
||||
|
||||
val _ = doc_cmd \<^command_keyword>\<open>Assumption*\<close> "Freeform Assumption"
|
||||
Assumption_default_class "assm";
|
||||
|
||||
val _ = doc_cmd \<^command_keyword>\<open>Hypothesis*\<close> "Freeform Hypothesis"
|
||||
Hypothesis_default_class "prpo";
|
||||
|
||||
val _ = doc_cmd \<^command_keyword>\<open>Assertion*\<close> "Freeform Assertion"
|
||||
Assertion_default_class "assn";
|
||||
|
||||
val _ = doc_cmd \<^command_keyword>\<open>Proof*\<close> "Freeform Proof"
|
||||
Proof_default_class "prf_stmt";
|
||||
|
||||
val _ = doc_cmd \<^command_keyword>\<open>Example*\<close> "Freeform Example"
|
||||
Example_default_class "expl_stmt";
|
||||
end
|
||||
end
|
||||
\<close>
|
||||
|
||||
|
@ -348,29 +503,14 @@ doc_class math_formal = math_content +
|
|||
properties :: "term list"
|
||||
type_synonym math_fc = math_formal
|
||||
|
||||
doc_class assertion = math_formal +
|
||||
referentiable :: bool <= True (* No support in Backend yet. *)
|
||||
status :: status <= "formal"
|
||||
properties :: "term list"
|
||||
|
||||
|
||||
subsubsection*[ex_ass::example]\<open>Logical Assertions\<close>
|
||||
|
||||
text\<open>Logical assertions allow for logical statements to be checked in the global context). \<close>
|
||||
|
||||
subsubsection*[ex_ass::example]\<open>Example\<close>
|
||||
text\<open>Assertions allow for logical statements to be checked in the global context). \<close>
|
||||
assert*[ass1::assertion, short_name = "\<open>This is an assertion\<close>"] \<open>(3::int) < 4\<close>
|
||||
|
||||
subsection\<open>Example Statements\<close>
|
||||
|
||||
text\<open> \<^verbatim>\<open>examples\<close> are currently considered \<^verbatim>\<open>technical\<close>. Is a main category to be refined
|
||||
via inheritance. \<close>
|
||||
|
||||
doc_class tech_example = technical +
|
||||
referentiable :: bool <= True
|
||||
tag :: "string" <= "''''"
|
||||
|
||||
|
||||
|
||||
|
||||
subsection\<open>Content in Engineering/Tech Papers \<close>
|
||||
text\<open>This section is currently experimental and not supported by the documentation
|
||||
|
@ -379,22 +519,43 @@ text\<open>This section is currently experimental and not supported by the docum
|
|||
doc_class engineering_content = tc +
|
||||
short_name :: string <= "''''"
|
||||
status :: status
|
||||
type_synonym eng_c = engineering_content
|
||||
type_synonym eng_content = engineering_content
|
||||
|
||||
doc_class "experiment" = eng_c +
|
||||
|
||||
doc_class "experiment" = eng_content +
|
||||
tag :: "string" <= "''''"
|
||||
|
||||
doc_class "evaluation" = eng_c +
|
||||
doc_class "evaluation" = eng_content +
|
||||
tag :: "string" <= "''''"
|
||||
|
||||
doc_class "data" = eng_c +
|
||||
doc_class "data" = eng_content +
|
||||
tag :: "string" <= "''''"
|
||||
|
||||
doc_class tech_definition = eng_content +
|
||||
referentiable :: bool <= True
|
||||
tag :: "string" <= "''''"
|
||||
|
||||
doc_class tech_code = eng_content +
|
||||
referentiable :: bool <= True
|
||||
tag :: "string" <= "''''"
|
||||
|
||||
doc_class tech_example = eng_content +
|
||||
referentiable :: bool <= True
|
||||
tag :: "string" <= "''''"
|
||||
|
||||
doc_class eng_example = eng_content +
|
||||
referentiable :: bool <= True
|
||||
tag :: "string" <= "''''"
|
||||
|
||||
|
||||
subsection\<open>Some Summary\<close>
|
||||
|
||||
print_doc_classes
|
||||
|
||||
print_doc_class_template "definition" (* just a sample *)
|
||||
print_doc_class_template "lemma" (* just a sample *)
|
||||
print_doc_class_template "theorem" (* just a sample *)
|
||||
print_doc_class_template "premise" (* just a sample *)
|
||||
|
||||
|
||||
subsection\<open>Structuring Enforcement in Engineering/Math Papers \<close>
|
||||
|
@ -509,6 +670,7 @@ define_shortcut* eg \<rightleftharpoons> \<open>\eg\<close> (* Latin: „exemp
|
|||
ie \<rightleftharpoons> \<open>\ie\<close> (* Latin: „id est“ meaning „that is to say“. *)
|
||||
etc \<rightleftharpoons> \<open>\etc\<close> (* Latin : „et cetera“ meaning „et cetera“ *)
|
||||
|
||||
print_doc_classes
|
||||
|
||||
end
|
||||
|
||||
|
|
|
@ -98,6 +98,17 @@ object DOF_Document_Build
|
|||
\newcommand{\isadofgenericdoi}{""" + DOF.generic_doi + """}
|
||||
\newcommand{\isabellelatestversion}{""" + DOF.latest_isabelle + """}
|
||||
""")
|
||||
|
||||
|
||||
val texinputs: Path = Path.explode("~~/lib/texinputs")
|
||||
val comment_latex = options.bool("document_comment_latex")
|
||||
if (!comment_latex) {
|
||||
Isabelle_System.copy_file(texinputs + Path.basic("comment.sty"), directory.doc_dir)
|
||||
}
|
||||
|
||||
doc.tags.sty(comment_latex).write(directory.doc_dir)
|
||||
|
||||
|
||||
directory
|
||||
}
|
||||
}
|
||||
|
|
|
@ -103,13 +103,44 @@ fun transform_cid thy NONE X = X
|
|||
end
|
||||
in
|
||||
|
||||
fun enriched_formal_statement_command ncid (S: (string * string) list) =
|
||||
let fun transform_attr doc_attrs = (map (fn(cat,tag) => ((cat,@{here}),tag)) S) @
|
||||
(("formal_results",@{here}),"([]::thm list)")::doc_attrs
|
||||
in fn margs => fn thy =>
|
||||
fun transform_attr S cid_long thy doc_attrs =
|
||||
let
|
||||
fun transform_attr' [] doc_attrs = doc_attrs
|
||||
| transform_attr' (s::S) (doc_attrs) =
|
||||
let fun markup2string s = s |> YXML.content_of
|
||||
|> Symbol.explode
|
||||
|> List.filter (fn c => c <> Symbol.DEL)
|
||||
|> String.concat
|
||||
fun toLong n = DOF_core.get_attribute_info cid_long (markup2string n) thy
|
||||
|> the |> #long_name
|
||||
val (name', value) = s |> (fn (n, v) => (toLong n, v))
|
||||
val doc_attrs' = doc_attrs
|
||||
|> map (fn (name, term) => if name = name'
|
||||
then (name, value)
|
||||
else (name, term))
|
||||
in if doc_attrs' = doc_attrs
|
||||
then transform_attr' S doc_attrs' |> cons (name', value)
|
||||
else transform_attr' S doc_attrs'
|
||||
end
|
||||
in transform_attr' S doc_attrs end
|
||||
|
||||
(*fun update_meta_args_attrs S
|
||||
((((oid, pos),cid_pos), doc_attrs) : ODL_Meta_Args_Parser.meta_args_t) =
|
||||
(((oid, pos),cid_pos), transform_attr S doc_attrs)
|
||||
*)
|
||||
(*fun enriched_formal_statement_command ncid (S: (string * string) list) =
|
||||
fn margs => fn thy =>
|
||||
Monitor_Command_Parser.gen_enriched_document_cmd {inline=true}
|
||||
(transform_cid thy ncid) transform_attr margs thy
|
||||
end;
|
||||
(transform_cid thy ncid) (transform_attr S) margs thy
|
||||
*)
|
||||
|
||||
fun enriched_formal_statement_command ncid (S: (string * string) list) =
|
||||
let fun transform_attr doc_attrs = (map (fn(cat,tag) => ((cat,@{here}),tag)) S) @
|
||||
(("formal_results",@{here}),"([]::thm list)")::doc_attrs
|
||||
in fn margs => fn thy =>
|
||||
Monitor_Command_Parser.gen_enriched_document_cmd {inline=true}
|
||||
(transform_cid thy ncid) transform_attr margs thy
|
||||
end;
|
||||
|
||||
fun enriched_document_cmd_exp ncid (S: (string * string) list) =
|
||||
(* expands ncid into supertype-check. *)
|
||||
|
@ -123,7 +154,7 @@ end (* local *)
|
|||
|
||||
fun heading_command (name, pos) descr level =
|
||||
Monitor_Command_Parser.document_command (name, pos) descr
|
||||
{markdown = false, body = true} (enriched_text_element_cmd level);
|
||||
{markdown = false, body = true} (enriched_text_element_cmd level) (K(K I));
|
||||
|
||||
val _ = heading_command \<^command_keyword>\<open>title*\<close> "section heading" NONE;
|
||||
val _ = heading_command \<^command_keyword>\<open>subtitle*\<close> "section heading" NONE;
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
(*************************************************************************
|
||||
* Copyright (C)
|
||||
* 2019 The University of Exeter
|
||||
* 2018-2019 The University of Paris-Saclay
|
||||
* 2019-2023 The University of Exeter
|
||||
* 2018-2023 The University of Paris-Saclay
|
||||
* 2018 The University of Sheffield
|
||||
*
|
||||
* License:
|
||||
|
@ -13,7 +13,7 @@
|
|||
|
||||
(*<*)
|
||||
theory "Isabelle_DOF-Manual"
|
||||
imports "05_Implementation"
|
||||
imports "M_05_Implementation"
|
||||
begin
|
||||
close_monitor*[this]
|
||||
check_doc_global
|
||||
|
|
|
@ -12,7 +12,7 @@
|
|||
*************************************************************************)
|
||||
|
||||
(*<*)
|
||||
theory "00_Frontmatter"
|
||||
theory "M_00_Frontmatter"
|
||||
imports
|
||||
"Isabelle_DOF.technical_report"
|
||||
begin
|
||||
|
@ -20,8 +20,6 @@ begin
|
|||
use_template "scrreprt-modern"
|
||||
use_ontology "technical_report"
|
||||
|
||||
|
||||
|
||||
section\<open>Local Document Setup.\<close>
|
||||
text\<open>Introducing document specific abbreviations and macros:\<close>
|
||||
|
||||
|
@ -32,6 +30,7 @@ define_shortcut* TeXLive \<rightleftharpoons> \<open>\TeXLive\<close>
|
|||
BibTeX \<rightleftharpoons> \<open>\BibTeX{}\<close>
|
||||
LaTeX \<rightleftharpoons> \<open>\LaTeX{}\<close>
|
||||
TeX \<rightleftharpoons> \<open>\TeX{}\<close>
|
||||
dofurl \<rightleftharpoons> \<open>\dofurl\<close>
|
||||
pdf \<rightleftharpoons> \<open>PDF\<close>
|
||||
|
||||
text\<open>Note that these setups assume that the associated \<^LaTeX> macros
|
||||
|
@ -109,12 +108,15 @@ author*[ adb,
|
|||
orcid ="\<open>0000-0002-6355-1200\<close>",
|
||||
http_site ="\<open>https://www.brucker.ch/\<close>",
|
||||
affiliation ="\<open>University of Exeter, Exeter, UK\<close>"]\<open>Achim D. Brucker\<close>
|
||||
author*[ nico,
|
||||
email = "\<open>nicolas.meric@lri.fr\<close>",
|
||||
affiliation = "\<open>Université Paris-Saclay, LRI, Paris, France\<close>"]\<open>Nicolas Méric\<close>
|
||||
author*[ bu,
|
||||
email = "\<open>wolff@lri.fr\<close>",
|
||||
affiliation = "\<open>Université Paris-Saclay, LRI, Paris, France\<close>"]\<open>Burkhart Wolff\<close>
|
||||
|
||||
abstract*[abs, keywordlist="[\<open>Ontology\<close>, \<open>Ontological Modeling\<close>, \<open>Document Management\<close>,
|
||||
\<open>Formal Document Development\<close>,\<open>Document Authoring\<close>,\<open>Isabelle/DOF\<close>]"]
|
||||
\<open>Formal Document Development\<close>,\<open>Isabelle/DOF\<close>]"]
|
||||
\<open> \<^isadof> provides an implementation of \<^dof> on top of Isabelle/HOL.
|
||||
\<^dof> itself is a novel framework for \<^emph>\<open>defining\<close> ontologies
|
||||
and \<^emph>\<open>enforcing\<close> them during document development and document
|
|
@ -1,7 +1,7 @@
|
|||
(*************************************************************************
|
||||
* Copyright (C)
|
||||
* 2019-2021 The University of Exeter
|
||||
* 2018-2021 The University of Paris-Saclay
|
||||
* 2019-2023 The University of Exeter
|
||||
* 2018-2023 The University of Paris-Saclay
|
||||
* 2018 The University of Sheffield
|
||||
*
|
||||
* License:
|
||||
|
@ -12,8 +12,8 @@
|
|||
*************************************************************************)
|
||||
|
||||
(*<*)
|
||||
theory "01_Introduction"
|
||||
imports "00_Frontmatter"
|
||||
theory "M_01_Introduction"
|
||||
imports "M_00_Frontmatter"
|
||||
begin
|
||||
(*>*)
|
||||
|
||||
|
@ -28,8 +28,8 @@ form of the structure of documents as well as the document discourse.
|
|||
|
||||
Such ontologies can be used for the scientific discourse within scholarly articles, mathematical
|
||||
libraries, and in the engineering discourse of standardized software certification
|
||||
documents~@{cite "boulanger:cenelec-50128:2015" and "cc:cc-part3:2006"}: certification documents
|
||||
have to follow a structure. In practice, large groups of developers have to produce a substantial
|
||||
documents~@{cite "boulanger:cenelec-50128:2015" and "cc:cc-part3:2006"}. All these
|
||||
documents contain formal content and have to follow a given structure. In practice, large groups of developers have to produce a substantial
|
||||
set of documents where the consistency is notoriously difficult to maintain. In particular,
|
||||
certifications are centered around the \<^emph>\<open>traceability\<close> of requirements throughout the entire
|
||||
set of documents. While technical solutions for the traceability problem exists (most notably:
|
||||
|
@ -38,32 +38,41 @@ and their logical contexts).
|
|||
|
||||
Further applications are the domain-specific discourse in juridical texts or medical reports.
|
||||
In general, an ontology is a formal explicit description of \<^emph>\<open>concepts\<close> in a domain of discourse
|
||||
(called \<^emph>\<open>classes\<close>), properties of each concept describing \<^emph>\<open>attributes\<close> of the concept, as well
|
||||
as \<^emph>\<open>links\<close> between them. A particular link between concepts is the \<^emph>\<open>is-a\<close> relation declaring
|
||||
(called \<^emph>\<open>classes\<close>), components (called \<^emph>\<open>attributes\<close>) of the concept, and properties (called
|
||||
\<^emph>\<open>invariants\<close>) on concepts. Logically, classes are represented by a type (the class type) and
|
||||
particular terms representing \<^emph>\<open>instances\<close> of them. Since components are typed, it is therefore
|
||||
possible to express \<^emph>\<open>links\<close> like \<open>m\<close>-to-\<open>n\<close> relations between classes.
|
||||
Another form of link between concepts is the \<^emph>\<open>is-a\<close> relation declaring
|
||||
the instances of a subclass to be instances of the super-class.
|
||||
|
||||
To address this challenge, we present the Document Ontology Framework (\<^dof>) and an
|
||||
implementation of \<^dof> called \<^isadof>. \<^dof> is designed for building scalable and user-friendly
|
||||
tools on top of interactive theorem provers. \<^isadof> is an instance of this novel framework,
|
||||
implemented as extension of Isabelle/HOL, to \<^emph>\<open>model\<close> typed ontologies and to \<^emph>\<open>enforce\<close> them
|
||||
during document evolution. Based on Isabelle's infrastructures, ontologies may refer to types,
|
||||
terms, proven theorems, code, or established assertions. Based on a novel adaption of the Isabelle
|
||||
IDE (called PIDE, @{cite "wenzel:asynchronous:2014"}), a document is checked to be
|
||||
\<^emph>\<open>conform\<close> to a particular ontology---\<^isadof> is designed to give fast user-feedback \<^emph>\<open>during the
|
||||
capture of content\<close>. This is particularly valuable in case of document evolution, where the
|
||||
\<^emph>\<open>coherence\<close> between the formal and the informal parts of the content can be mechanically checked.
|
||||
Engineering an ontological language for documents that contain both formal and informal elements
|
||||
as occuring in formal theories is a particular challenge. To address this latter, we present
|
||||
the Document Ontology Framework (\<^dof>) and an implementation of \<^dof> called \<^isadof>.
|
||||
\<^dof> is designed for building scalable and user-friendly tools on top of interactive theorem
|
||||
provers. \<^isadof> is an instance of this novel framework, implemented as extension of Isabelle/HOL,
|
||||
to \<^emph>\<open>model\<close> typed ontologies and to \<^emph>\<open>enforce\<close> them during document evolution. Based on Isabelle's
|
||||
infrastructures, ontologies may refer to types, terms, proven theorems, code, or established
|
||||
assertions. Based on a novel adaption of the Isabelle IDE (called PIDE, @{cite "wenzel:asynchronous:2014"}),
|
||||
a document is checked to be \<^emph>\<open>conform\<close> to a particular ontology---\<^isadof> is designed to give fast
|
||||
user-feedback \<^emph>\<open>during the capture of content\<close>. This is particularly valuable in case of document
|
||||
evolution, where the \<^emph>\<open>coherence\<close> between the formal and the informal parts of the content can
|
||||
be mechanically checked.
|
||||
|
||||
To avoid any misunderstanding: \<^isadof> is \<^emph>\<open>not a theory in HOL\<close> on ontologies and operations to
|
||||
track and trace links in texts, it is an \<^emph>\<open>environment to write structured text\<close> which
|
||||
\<^emph>\<open>may contain\<close> Isabelle/HOL definitions and proofs like mathematical articles, tech-reports and
|
||||
scientific papers---as the present one, which is written in \<^isadof> itself. \<^isadof> is a plugin
|
||||
into the Isabelle/Isar framework in the style of~@{cite "wenzel.ea:building:2007"}.\<close>
|
||||
into the Isabelle/Isar framework in the style of~@{cite "wenzel.ea:building:2007"}. However,
|
||||
\<^isadof> will generate from ontologies a theory infrastructure consisting of types, terms, theorems
|
||||
and code that allows both interactive checking as well as formal reasoning over meta-data
|
||||
related to annotated documents.\<close>
|
||||
|
||||
subsubsection\<open>How to Read This Manual\<close>
|
||||
(*<*)
|
||||
declare_reference*[background::text_section]
|
||||
declare_reference*[isadof_tour::text_section]
|
||||
declare_reference*[isadof_ontologies::text_section]
|
||||
declare_reference*[writing_doc::text_section]
|
||||
declare_reference*[isadof_developers::text_section]
|
||||
(*>*)
|
||||
text\<open>
|
||||
|
@ -91,9 +100,9 @@ text\<open>
|
|||
by simp\<close>}
|
||||
\<^item> a green background for examples of generated document fragments (\<^ie>, PDF output):
|
||||
@{boxed_pdf [display] \<open>The axiom refl\<close>}
|
||||
\<^item> a red background for (S)ML-code:
|
||||
\<^item> a red background for SML-code:
|
||||
@{boxed_sml [display] \<open>fun id x = x\<close>}
|
||||
\<^item> a yellow background for \LaTeX-code:
|
||||
\<^item> a yellow background for \<^LaTeX>-code:
|
||||
@{boxed_latex [display] \<open>\newcommand{\refl}{$x = x$}\<close>}
|
||||
\<^item> a grey background for shell scripts and interactive shell sessions:
|
||||
@{boxed_bash [display]\<open>ë\prompt{}ë ls
|
||||
|
@ -104,6 +113,15 @@ subsubsection\<open>How to Cite \<^isadof>\<close>
|
|||
text\<open>
|
||||
If you use or extend \<^isadof> in your publications, please use
|
||||
\<^item> for the \<^isadof> system~@{cite "brucker.ea:isabelle-ontologies:2018"}:
|
||||
\begin{quote}\small
|
||||
A.~D. Brucker, I.~Ait-Sadoune, N. Méric, and B.~Wolff. Using Deep Ontologies in Formal
|
||||
Software Engineering. In \<^emph>\<open>International Conference on Rigorous State-Based Methods (ABZ 2023)\<close>,
|
||||
To appear in Lecture Notes in Computer Science. Springer-Verlag,
|
||||
Heidelberg, 2023. \href{https://doi.org/???} {10.1007/????????}.
|
||||
\end{quote}
|
||||
A \<^BibTeX>-entry is available at:
|
||||
\<^url>\<open>https://www.lri.fr/~wolff/bibtex/wolff.html\<close>.
|
||||
\<^item> an older description of the system~@{cite "brucker.ea:isabelle-ontologies:2018"}:
|
||||
\begin{quote}\small
|
||||
A.~D. Brucker, I.~Ait-Sadoune, P.~Crisafulli, and B.~Wolff. Using the {Isabelle} ontology
|
||||
framework: Linking the formal with the informal. In \<^emph>\<open>Conference on Intelligent Computer
|
||||
|
@ -128,13 +146,13 @@ text\<open>
|
|||
Using Ontologies in Formal Developments Targeting Certification.
|
||||
In W. Ahrendt and S. Tarifa, editors. \<^emph>\<open>Integrated Formal Methods (IFM)\<close>, number 11918.
|
||||
Lecture Notes in Computer Science. Springer-Verlag, Heidelberg, 2019.
|
||||
\href{https://doi.org/10.1007/978-3-030-34968-4\_4}.
|
||||
\<^url>\<open>https://doi.org/10.1007/978-3-030-34968-4_4\<close>.
|
||||
\end{quote}
|
||||
\<close>
|
||||
subsubsection\<open>Availability\<close>
|
||||
text\<open>
|
||||
The implementation of the framework is available at
|
||||
\url{\dofurl}. The website also provides links to the latest releases. \<^isadof> is licensed
|
||||
\url{\<^dofurl>}. The website also provides links to the latest releases. \<^isadof> is licensed
|
||||
under a 2-clause BSD license (SPDX-License-Identifier: BSD-2-Clause).
|
||||
\<close>
|
||||
|
|
@ -1,7 +1,7 @@
|
|||
(*************************************************************************
|
||||
* Copyright (C)
|
||||
* 2019-2022 The University of Exeter
|
||||
* 2018-2022 The University of Paris-Saclay
|
||||
* 2019-2023 The University of Exeter
|
||||
* 2018-2023 The University of Paris-Saclay
|
||||
* 2018 The University of Sheffield
|
||||
*
|
||||
* License:
|
||||
|
@ -12,8 +12,8 @@
|
|||
*************************************************************************)
|
||||
|
||||
(*<*)
|
||||
theory "02_Background"
|
||||
imports "01_Introduction"
|
||||
theory "M_02_Background"
|
||||
imports "M_01_Introduction"
|
||||
begin
|
||||
(*>*)
|
||||
|
||||
|
@ -30,8 +30,8 @@ While Isabelle is widely perceived as an interactive theorem
|
|||
prover for HOL (Higher-order Logic)~@{cite "nipkow.ea:isabelle:2002"}, we would like to emphasize
|
||||
the view that Isabelle is far more than that: it is the \<^emph>\<open>Eclipse of Formal Methods Tools\<close>. This
|
||||
refers to the ``\<^emph>\<open>generic system framework of Isabelle/Isar underlying recent versions of Isabelle.
|
||||
Among other things, Isar provides an infrastructure for Isabelle plug-ins, comprising extensible
|
||||
state components and extensible syntax that can be bound to ML programs. Thus, the Isabelle/Isar
|
||||
Among other things, Isabelle provides an infrastructure for Isabelle plug-ins, comprising extensible
|
||||
state components and extensible syntax that can be bound to SML programs. Thus, the Isabelle
|
||||
architecture may be understood as an extension and refinement of the traditional `LCF approach',
|
||||
with explicit infrastructure for building derivative systems.\<close>''~@{cite "wenzel.ea:building:2007"}
|
||||
|
||||
|
@ -49,41 +49,47 @@ The Isabelle system architecture shown in @{docitem \<open>architecture\<close>}
|
|||
with Standard ML (SML) at the bottom layer as implementation language. The architecture actually
|
||||
foresees a \<^emph>\<open>Nano-Kernel\<close> (our terminology) which resides in the SML structure \<^boxed_sml>\<open>Context\<close>.
|
||||
This structure provides a kind of container called \<^emph>\<open>context\<close> providing an identity, an
|
||||
ancestor-list as well as typed, user-defined state for components (plugins) such as \<^isadof>.
|
||||
ancestor-list as well as typed, user-defined state for plugins such as \<^isadof>.
|
||||
On top of the latter, the LCF-Kernel, tactics, automated proof procedures as well as specific
|
||||
support for higher specification constructs were built.\<close>
|
||||
support for higher specification constructs were built.
|
||||
\<^footnote>\<open>We use the term \<^emph>\<open>plugin\<close> for a collection of HOL-definitions, SML and Scala code in order
|
||||
to distinguish it from the official Isabelle term \<^emph>\<open>component\<close> which implies a particular
|
||||
format and support by the Isabelle build system.\<close>
|
||||
\<close>
|
||||
|
||||
section*[dof::introduction]\<open>The Document Model Required by \<^dof>\<close>
|
||||
text\<open>
|
||||
In this section, we explain the assumed document model underlying our Document Ontology Framework
|
||||
(\<^dof>) in general. In particular we discuss the concepts
|
||||
\<^emph>\<open>integrated document\<close>\<^bindex>\<open>integrated document\<close>, \<^emph>\<open>sub-document\<close>\<^bindex>\<open>sub-document\<close>,
|
||||
\<^emph>\<open>text-element\<close>\<^bindex>\<open>text-element\<close>, and \<^emph>\<open>semantic macros\<close>\<^bindex>\<open>semantic macros\<close> occurring
|
||||
inside text-elements. Furthermore, we assume two different levels of parsers
|
||||
(for \<^emph>\<open>outer\<close> and \<^emph>\<open>inner syntax\<close>) where the inner-syntax is basically a typed \<open>\<lambda>\<close>-calculus
|
||||
and some Higher-order Logic (HOL)\<^bindex>\<open>HOL\<close>.
|
||||
\<close>
|
||||
\<^emph>\<open>document-element\<close>\<^bindex>\<open>document-element\<close>, and \<^emph>\<open>semantic macros\<close>\<^bindex>\<open>semantic macros\<close> occurring
|
||||
inside document-elements. This type of document structure is quite common for scripts interactively
|
||||
evaluated in an incremental fashion.
|
||||
Furthermore, we assume a bracketing mechanism that unambiguously allows to separate different
|
||||
syntactic fragments and that can be nested. In the case of Isabelle, these are the guillemot
|
||||
symbols \<open>\<open>...\<close>\<close>, which represent the begin and end of a \<^emph>\<open>cartouche\<close>\<^bindex>\<open>cartouche\<close>.\<close>
|
||||
|
||||
|
||||
(*<*)
|
||||
declare_reference*["fig:dependency"::figure]
|
||||
(* declare_reference*["fig:dependency"::figure]
|
||||
@{figure (unchecked) "fig:dependency"} *)
|
||||
declare_reference*[docModGenConcr::figure]
|
||||
(*>*)
|
||||
|
||||
|
||||
text\<open>
|
||||
The Isabelle Framework is based on a \<^emph>\<open>document-centric view\<close>\<^bindex>\<open>document-centric view\<close> of
|
||||
a document, treating the input in its integrality as set of (user-programmable) \<^emph>\<open>document element\<close>
|
||||
that may mutually depend on and link to each other; A \<^emph>\<open>document\<close> in our sense is what is configured in a set of
|
||||
\<^verbatim>\<open>ROOT\<close>- and \<^verbatim>\<open>ROOTS\<close>-files.
|
||||
that may mutually depend on and link to each other; A \<^emph>\<open>document\<close> in our sense is what is configured
|
||||
in a set of \<^verbatim>\<open>ROOT\<close>- and \<^verbatim>\<open>ROOTS\<close>-files.
|
||||
|
||||
Isabelle assumes a hierarchical document model\<^index>\<open>document model\<close>, \<^ie>, an \<^emph>\<open>integrated\<close> document
|
||||
consist of a hierarchy of \<^emph>\<open>sub-documents\<close> (files); dependencies are restricted to be
|
||||
acyclic at this level.
|
||||
Sub-documents can have different document types in order to capture documentations consisting of
|
||||
acyclic at this level (c.f. @{figure (unchecked) "docModGenConcr"}).
|
||||
Document parts can have different document types in order to capture documentations consisting of
|
||||
documentation, models, proofs, code of various forms and other technical artifacts. We call the
|
||||
main sub-document type, for historical reasons, \<^emph>\<open>theory\<close>-files. A theory file\<^bindex>\<open>theory!file\<close>
|
||||
consists of a \<^emph>\<open>header\<close>\<^bindex>\<open>header\<close>, a \<^emph>\<open>context definition\<close>\<^index>\<open>context\<close>, and a body
|
||||
consisting of a sequence of document elements called
|
||||
\<^emph>\<open>command\<close>s (see @{figure (unchecked) "fig:dependency"}). Even
|
||||
\<^emph>\<open>command\<close>s (see @{figure (unchecked) "docModGenConcr"}(left)). Even
|
||||
the header consists of a sequence of commands used for introductory text elements not depending on
|
||||
any context. The context-definition contains an \<^boxed_theory_text>\<open>import\<close> and a
|
||||
\<^boxed_theory_text>\<open>keyword\<close> section, for example:
|
||||
|
@ -98,19 +104,26 @@ text\<open>
|
|||
\<^boxed_theory_text>\<open>keywords\<close> are used to separate commands from each other.
|
||||
\<close>
|
||||
|
||||
text\<open> A text-element \<^index>\<open>text-element\<close> may look like this:
|
||||
side_by_side_figure*[docModGenConcr::side_by_side_figure,anchor="''docModGen''",
|
||||
caption="''Schematic Representation.''",relative_width="45",
|
||||
src="''figures/doc-mod-generic.png''",anchor2="''docModIsar''",
|
||||
caption2="''The Isar Instance.''",relative_width2="45",
|
||||
src2="''figures/doc-mod-isar.png''"]\<open>A Representation of a Document Model.\<close>
|
||||
|
||||
text\<open>The body of a theory file consists of a sequence of \<^emph>\<open>commands\<close> that must be introduced
|
||||
by a command keyword such as \<^boxed_theory_text>\<open>requirement\<close> above. Command keywords may mark
|
||||
the the begin of a text that is parsed by a command-specific parser; the end of the
|
||||
command-span is defined by the next keyword. Commands were used to define definitions, lemmas,
|
||||
code and text-elements (see @{figure "docModGenConcr"}(right)). \<close>
|
||||
|
||||
text\<open> A simple text-element \<^index>\<open>text-element\<close> may look like this:
|
||||
|
||||
@{boxed_theory_text [display]\<open>
|
||||
text\<open> According to the \<^emph>\<open>reflexivity\<close> axiom @{thm refl},
|
||||
we obtain in \<Gamma> for @{term "fac 5"} the result @{value "fac 5"}.\<close>\<close>}
|
||||
text\<open> This is a simple text.\<close>\<close>}
|
||||
... so it is a command \<^theory_text>\<open>text\<close> followed by an argument (here in \<open>\<open> ... \<close>\<close> parenthesis) which
|
||||
contains characters and a special notation for semantic macros \<^bindex>\<open>semantic macros\<close>
|
||||
(here \<^theory_text>\<open>@{term "fac 5"}).\<close>
|
||||
\<close>
|
||||
|
||||
text\<open>While we concentrate in this manual on \<^theory_text>\<open>text\<close>-document elements --- this is the main
|
||||
use of \<^dof> in its current stage --- it is important to note that there are actually three
|
||||
families of ``ontology aware'' document elements with analogous
|
||||
contains characters. While \<^theory_text>\<open>text\<close>- elements play a major role in this manual --- document
|
||||
generation is the main use-case of \<^dof> in its current stage --- it is important to note that there
|
||||
are actually three families of ``ontology aware'' document elements with analogous
|
||||
syntax to standard ones. The difference is a bracket with meta-data of the form:
|
||||
@{theory_text [display,indent=5, margin=70]
|
||||
\<open>
|
||||
|
@ -119,49 +132,73 @@ ML*[label::classid, attr\<^sub>1=E\<^sub>1, ... attr\<^sub>n=E\<^sub>n]\<open> s
|
|||
value*[label::classid, attr\<^sub>1=E\<^sub>1, ... attr\<^sub>n=E\<^sub>n]\<open> some annotated \<lambda>-term \<close>
|
||||
\<close>}
|
||||
|
||||
Depending on the family, we will speak about \<^emph>\<open>(formal) text-contexts\<close>,\<^index>\<open>formal text-contexts\<close>
|
||||
\<^emph>\<open>(ML) code-contexts\<close>\<^index>\<open>code-contexts\<close> and \<^emph>\<open>term-contexts\<close>\<^index>\<open>term-contexts\<close> if we refer
|
||||
to sub-elements inside the \<open>\<open>...\<close>\<close> cartouches of these command families. Note that the Isabelle
|
||||
framework allows for nesting cartouches that permits to support switching into a different
|
||||
context. In general, this has also the effect that the evaluation of antiquotations changes.
|
||||
Other instances of document elements belonging to these families are, for example, the freeform
|
||||
\<^theory_text>\<open>Definition*\<close> and \<^theory_text>\<open>Lemma*\<close> as well as their formal counterparts \<^theory_text>\<open>definition*\<close> and \<^theory_text>\<open>lemma*\<close>,
|
||||
which allow in addition to their standard Isabelle functionality the creation and management of
|
||||
ontology-generated meta-data associated to them (cf. -@{text_section (unchecked) "writing_doc"}).
|
||||
|
||||
Depending on the family, we will speak about \<^emph>\<open>(formal) text-contexts\<close>,\<^bindex>\<open>formal text-contexts\<close>
|
||||
\<^emph>\<open>(ML) code-contexts\<close>\<^bindex>\<open>code-contexts\<close> and \<^emph>\<open>term-contexts\<close>\<^bindex>\<open>term-contexts\<close> if we refer
|
||||
to sub-elements inside the \<open>\<open>...\<close>\<close> cartouches of these command families.
|
||||
|
||||
Text- code- or term contexts may contain a special form comment, that may be considered as a
|
||||
"semantic macro" or a machine-checked annotation: the so-called antiquotations\<^bindex>\<open>antiquotation\<close>.
|
||||
Its Its general syntactic format reads as follows:
|
||||
|
||||
@{boxed_theory_text [display]\<open> @{antiquotation_name (args) [more_args] \<open>sub-context\<close> }\<close>}
|
||||
|
||||
The sub-context may be different from the surrounding one; therefore, it is possible
|
||||
to switch from a text-context to a term-context, for example. Therefore, antiquotations allow
|
||||
the nesting of cartouches, albeit not all combinations are actually supported.
|
||||
\<^footnote>\<open>In the literature, this concept has been referred to \<open>Cascade-Syntax\<close> and was used in the
|
||||
Centaur-system and is existing in some limited form in some Emacs-implementations these days. \<close>
|
||||
\<close>
|
||||
text\<open>
|
||||
On the semantic level, we assume a validation process for an integrated document, where the
|
||||
semantics of a command is a transformation \<open>\<theta> \<rightarrow> \<theta>\<close> for some system state \<open>\<theta>\<close>.
|
||||
This document model can be instantiated depending on the text-code-, or term-contexts.
|
||||
For common text elements, \<^eg>, \<^theory_text>\<open>section\<open>...\<close>\<close> or \<^theory_text>\<open>text\<open>...\<close>\<close>,
|
||||
users can add informal text to a sub-document using a text command:
|
||||
@{boxed_theory_text [display] \<open>text\<open>This is a description.\<close>\<close> }
|
||||
This will type-set the corresponding text in, for example, a PDF document. However, this
|
||||
translation is not necessarily one-to-one: text elements can be enriched by formal, \<^ie>,
|
||||
machine-checked content via \<^emph>\<open>semantic macros\<close>, called antiquotations\<^bindex>\<open>antiquotation\<close>:
|
||||
@{boxed_theory_text [display]
|
||||
\<open>text\<open> According to the \<^emph>\<open>reflexivity\<close> axiom @{thm "refl"}, we obtain in \<Gamma>
|
||||
for @{term \<open>fac 5\<close>} the result @{value \<open>fac 5\<close>}.\<close>\<close>
|
||||
}
|
||||
which is represented in the final document (\<^eg>, a PDF) by:
|
||||
Centaur-system and is existing in some limited form in some Emacs-implementations these days. \<close>
|
||||
Isabelle comes with a number of built-in antiquotations for text- and code-contexts;
|
||||
a detailed overview can be found in @{cite "wenzel:isabelle-isar:2020"}. \<^dof> reuses this general
|
||||
infrastructure but \<^emph>\<open>generates\<close> its own families of antiquotations from ontologies.\<close>
|
||||
|
||||
text\<open> An example for a text-element \<^index>\<open>text-element\<close> using built-in antoquotations
|
||||
may look like this:
|
||||
|
||||
@{boxed_theory_text [display]\<open>
|
||||
text\<open> According to the \<^emph>\<open>reflexivity\<close> axiom @{thm refl},
|
||||
we obtain in \<Gamma> for @{term "fac 5"} the result @{value "fac 5"}.\<close>\<close>}
|
||||
... so it is a command \<^theory_text>\<open>text\<close> followed by an argument (here in \<open>\<open> ... \<close>\<close> parenthesis) which
|
||||
contains characters and a special notation for semantic macros \<^bindex>\<open>semantic macros\<close>
|
||||
(here \<^theory_text>\<open>@{term "fac 5"}\<close>).
|
||||
|
||||
The above text element is represented in the final document (\<^eg>, a PDF) by:
|
||||
@{boxed_pdf [display]
|
||||
\<open>According to the $\emph{reflexivity}$ axiom $\mathrm{x = x}$, we obtain in $\Gamma$
|
||||
for $\operatorname{fac} \text{\textrm{5}}$ the result $\text{\textrm{120}}$.\<close>
|
||||
}
|
||||
}\<close>
|
||||
|
||||
Semantic macros are partial functions of type \<open>\<theta> \<rightarrow> text\<close>; since they can use the
|
||||
system state, they can perform all sorts of specific checks or evaluations (type-checks,
|
||||
executions of code-elements, references to text-elements or proven theorems such as
|
||||
text\<open>Antiquotations seen as semantic macros are partial functions of type \<open>logical_context \<rightarrow> text\<close>;
|
||||
since they can use the system state, they can perform all sorts of specific checks or evaluations
|
||||
(type-checks, executions of code-elements, references to text-elements or proven theorems such as
|
||||
\<open>refl\<close>, which is the reference to the axiom of reflexivity).
|
||||
|
||||
Semantic macros establish \<^emph>\<open>formal content\<close> inside informal content; they can be
|
||||
Therefore, semantic macros can establish \<^emph>\<open>formal content\<close> inside informal content; they can be
|
||||
type-checked before being displayed and can be used for calculations before being
|
||||
typeset. They represent the device for linking the formal with the informal.
|
||||
typeset. They represent the device for linking formal with the informal content.
|
||||
\<close>
|
||||
|
||||
side_by_side_figure*[docModOnto::side_by_side_figure,anchor="''docModGen''",
|
||||
caption="''A Document with Ontological Annotations.''",relative_width="47",
|
||||
src="''figures/doc-mod-DOF.png''",anchor2="''docModDOF''",
|
||||
caption2="''Ontological References.''",relative_width2="47",
|
||||
src2="''figures/doc-mod-onto-docinst.png''"]\<open>Documents conform to Ontologies.\<close>
|
||||
|
||||
text\<open>Since Isabelle's commands are freely programmable, it is possible to implement \<^dof> as an
|
||||
extension of the system. In particular, the ontology language of \<^dof> provides an ontology
|
||||
definition language ODL that \<^emph>\<open>generates\<close> anti-quotations and the infrastructure to check and evaluate
|
||||
them. This allows for checking an annotated document with respect to a given ontology, which may be
|
||||
specific for a given domain-specific universe of discourse (see @{figure "docModOnto"}). ODL will
|
||||
be described in @{text_section (unchecked) "isadof_tour"} in more detail.\<close>
|
||||
|
||||
(*
|
||||
figure*["fig:dependency"::figure,relative_width="70",src="''figures/document-hierarchy''"]
|
||||
\<open>A Theory-Graph in the Document Model. \<close>
|
||||
|
||||
*)
|
||||
section*[bgrnd21::introduction]\<open>Implementability of the Document Model in other ITP's\<close>
|
||||
text\<open>
|
||||
Batch-mode checkers for \<^dof> can be implemented in all systems of the LCF-style prover family,
|
||||
|
@ -171,9 +208,7 @@ text\<open>
|
|||
to benefit from this experience, only Isabelle and Coq have the necessary infrastructure of
|
||||
asynchronous proof-processing and support by a PIDE~@{cite "wenzel:asynchronous:2014" and
|
||||
"wenzel:system:2014" and "barras.ea:pervasive:2013" and "faithfull.ea:coqoon:2018"} which
|
||||
in many features over-accomplishes the required features of \<^dof>. For example, current Isabelle
|
||||
versions offer cascade-syntaxes (different syntaxes and even parser-technologies which can be
|
||||
nested along the \<open>\<open>...\<close>\<close> barriers), while \<^dof> actually only requires a two-level syntax model.
|
||||
in many features over-accomplishes the required features of \<^dof>.
|
||||
\<close>
|
||||
|
||||
figure*["fig:dof-ide"::figure,relative_width="95",src="''figures/cicm2018-combined''"]\<open>
|
|
@ -13,9 +13,9 @@
|
|||
|
||||
(*<*)
|
||||
theory
|
||||
"03_GuidedTour"
|
||||
"M_03_GuidedTour"
|
||||
imports
|
||||
"02_Background"
|
||||
"M_02_Background"
|
||||
begin
|
||||
(*>*)
|
||||
|
||||
|
@ -74,9 +74,13 @@ is currently consisting out of two AFP entries:
|
|||
|
||||
\<^item> Isabelle\_DOF: This entry
|
||||
contains the Isabelle/DOF system itself, including the Isabelle/DOF manual.
|
||||
\<^item> Isabelle\_DOF-Example-Scholarly\_Paper: This entry contains an example of
|
||||
an academic paper written using the Isabelle/DOF system.
|
||||
\<close>
|
||||
\<^item> Isabelle\_DOF-Example-I: This entry contains an example of
|
||||
an academic paper written using the Isabelle/DOF system oriented towards
|
||||
\<^item> Isabelle\_DOF-Example-II: This entry contains another example of
|
||||
a mathematics-oriented academic paper.
|
||||
|
||||
It is recommended to follow structure one of these papers.\<close>
|
||||
|
||||
|
||||
section*[writing_doc::technical]\<open>Writing Documents\<close>
|
||||
|
||||
|
@ -87,9 +91,10 @@ not make use of a file called \<^verbatim>\<open>document/root.tex\<close>. Inst
|
|||
ontology represenations is done within theory files. To make use of this feature, one needs
|
||||
to add the option \<^verbatim>\<open>document_build = dof\<close> to the \<^verbatim>\<open>ROOT\<close> file.
|
||||
An example \<^verbatim>\<open>ROOT\<close> file looks as follows:
|
||||
|
||||
\<close>
|
||||
text\<open>
|
||||
\begin{config}{ROOT}
|
||||
session example = HOL +
|
||||
session example = Isabelle_DOF +
|
||||
options [document = pdf, document_output = "output", document_build = dof]
|
||||
(*theories [document = false]
|
||||
A
|
||||
|
@ -102,8 +107,19 @@ session example = HOL +
|
|||
The document template and ontology can be selected as follows:
|
||||
@{boxed_theory_text [display]
|
||||
\<open>
|
||||
use_template "scrreprt-modern"
|
||||
theory
|
||||
C
|
||||
imports
|
||||
Isabelle_DOF.technical_report
|
||||
Isabelle_DOF.scholarly_paper
|
||||
begin
|
||||
|
||||
list_templates
|
||||
use_template "scrreprt-modern"
|
||||
list_ontologies
|
||||
use_ontology "technical_report" and "scholarly_paper"
|
||||
|
||||
end
|
||||
\<close>}
|
||||
|
||||
The commands @{boxed_theory_text
|
||||
|
@ -120,8 +136,37 @@ can be used for inspecting (and selecting) the available ontologies and template
|
|||
list_templates
|
||||
list_ontologies
|
||||
\<close>}
|
||||
|
||||
Note that you need to import the theories that define the ontologies that you
|
||||
want to use. Otherwise, they will not be available.
|
||||
\<close>
|
||||
|
||||
paragraph\<open>Warning.\<close>
|
||||
text\<open>
|
||||
Note that the session @{session "Isabelle_DOF"} needs to be part of the ``main'' session
|
||||
hierarchy. Loading the Isabelle/DOF theories as part of a session section, e.g.,
|
||||
\<close>
|
||||
text\<open>
|
||||
\begin{config}{ROOT}
|
||||
session example = HOL +
|
||||
options [document = pdf, document_output = "output", document_build = dof]
|
||||
session
|
||||
Isabelle_DOF.scholarly_paper
|
||||
Isabelle_DOF.technical_report
|
||||
theories
|
||||
C
|
||||
\end{config}
|
||||
\<close>
|
||||
text\<open>
|
||||
will not work. Trying to build a document using such a setup will result in the
|
||||
following error message:
|
||||
|
||||
@{boxed_bash [display]\<open>ë\prompt{}ë
|
||||
isabelle build -D .
|
||||
Running example ...
|
||||
Bad document_build engine "dof"
|
||||
example FAILED\<close>}
|
||||
\<close>
|
||||
|
||||
subsection*[naming::example]\<open>Name-Spaces, Long- and Short-Names\<close>
|
||||
text\<open>\<^isadof> is built upon the name space and lexical conventions of Isabelle. Long-names were
|
||||
|
@ -135,7 +180,11 @@ By lexical conventions, theory-names must be unique inside a session
|
|||
(and session names must be unique too), such that long-names are unique by construction.
|
||||
There are actually different name categories that form a proper name space, \<^eg>, the name space for
|
||||
constant symbols and type symbols are distinguished.
|
||||
|
||||
Additionally, \<^isadof> objects also come with a proper name space: classes (and monitors), instances,
|
||||
low-level class invariants (SML-defined invariants) all follow the lexical conventions of
|
||||
Isabelle. For instance, a class can be referenced outside of its theory using
|
||||
its short-name or its long-name if another class with the same name is defined
|
||||
in the current theory.
|
||||
Isabelle identifies names already with the shortest suffix that is unique in the global
|
||||
context and in the appropriate name category. This also holds for pretty-printing, which can
|
||||
at times be confusing since names stemming from the same specification construct may
|
||||
|
@ -152,19 +201,15 @@ over decades.
|
|||
|
||||
text\<open>At times, this causes idiosyncrasies like the ones cited in the following incomplete list:
|
||||
\<^item> text-antiquotations
|
||||
\<^theory_text>\<open>text\<close>\<^latex>\<open>\isasymopen\textbf{thm} "normally\_behaved\_def"\isasymclose\ \<close>
|
||||
versus \<^theory_text>\<open>text\<close>\<^latex>\<open>\isasymopen @\{\textbf{thm} "srac$_1$\_def"\}\isasymclose\ \<close>
|
||||
(while
|
||||
\<^theory_text>\<open>text\<close>\<^latex>\<open>\isasymopen @\{\textbf{thm} \isasymopen srac$_1$\_def \isasymclose\}\isasymclose\ \<close>
|
||||
fails)
|
||||
\<^theory_text>\<open>text\<open>@{thm \<doublequote>srac\<^sub>1_def\<doublequote>}\<close>\<close>
|
||||
while \<^theory_text>\<open>text\<open>@{thm \<open>srac\<^sub>1_def\<close>}\<close>\<close> fails
|
||||
\<^item> commands \<^theory_text>\<open>thm fundamental_theorem_of_calculus\<close> and
|
||||
\<^theory_text>\<open>thm\<close>\<^latex>\<open> "fundamental\_theorem\_of\_calculus"\<close>
|
||||
or \<^theory_text>\<open>lemma\<close> \<^latex>\<open>"H"\<close> and \<^theory_text>\<open>lemma \<open>H\<close>\<close> and \<^theory_text>\<open>lemma H\<close>
|
||||
\<^item> string expressions \<^theory_text>\<open>term\<close>\<^latex>\<open>\isasymopen ''abc'' @ ''cd''\isasymclose\ \<close> and equivalent
|
||||
\<^theory_text>\<open>term\<close>\<^latex>\<open>\isasymopen \isasymopen abc\isasymclose\ @ \isasymopen cd\isasymclose\isasymclose\<close>;
|
||||
but
|
||||
\<^theory_text>\<open>term\<close>\<^latex>\<open>\isasymopen\isasymopen A \isasymlongrightarrow\ B\isasymclose\isasymclose\ \<close>
|
||||
not equivalent to \<^theory_text>\<open>term\<close>\<^latex>\<open>\isasymopen''A \isasymlongrightarrow\ B''\isasymclose\ \<close>
|
||||
\<^theory_text>\<open>thm \<doublequote>fundamental_theorem_of_calculus\<doublequote>\<close>
|
||||
or \<^theory_text>\<open>lemma \<doublequote>H\<doublequote>\<close> and \<^theory_text>\<open>lemma \<open>H\<close>\<close> and \<^theory_text>\<open>lemma H\<close>
|
||||
\<^item> string expressions
|
||||
\<^theory_text>\<open>term\<open>\<quote>\<quote>abc\<quote>\<quote> @ \<quote>\<quote>cd\<quote>\<quote>\<close>\<close> and equivalent
|
||||
\<^theory_text>\<open>term \<open>\<open>abc\<close> @ \<open>cd\<close>\<close>\<close>;
|
||||
but \<^theory_text>\<open>term\<open>\<open>A \<longrightarrow> B\<close>\<close>\<close> not equivalent to \<^theory_text>\<open>term\<open>\<quote>\<quote>A \<longrightarrow> B\<quote>\<quote>\<close>\<close>
|
||||
which fails.
|
||||
\<close>
|
||||
|
||||
|
@ -420,6 +465,9 @@ defined by \<^typ>\<open>article\<close>.
|
|||
\<close>
|
||||
|
||||
|
||||
figure*[doc_termAq::figure,relative_width="80",src="''figures/doc-mod-term-aq.png''"]
|
||||
\<open>Term-Antiquotations Referencing to Annotated Elements\<close>
|
||||
|
||||
side_by_side_figure*[exploring::side_by_side_figure,anchor="''fig-Dogfood-II-bgnd1''",
|
||||
caption="''Exploring a reference of a text-element.''",relative_width="48",
|
||||
src="''figures/Dogfood-II-bgnd1''",anchor2="''fig-bgnd-text_section''",
|
||||
|
@ -479,17 +527,19 @@ underlying logical context, which turns the arguments into \<^emph>\<open>formal
|
|||
source, in contrast to the free-form antiquotations which basically influence the presentation.
|
||||
|
||||
We still mention a few of these document antiquotations here:
|
||||
\<^item> \<^theory_text>\<open>@{thm \<open>refl\<close>}\<close> or \<^theory_text>\<open>@{thm [display] \<open>refl\<close>}\<close> check that \<^theory_text>\<open>refl\<close> is indeed a reference
|
||||
\<^item> \<^theory_text>\<open>@{thm \<doublequote>refl\<doublequote>}\<close> or \<^theory_text>\<open>@{thm [display] \<doublequote>refl\<doublequote>}\<close>
|
||||
check that \<^theory_text>\<open>refl\<close> is indeed a reference
|
||||
to a theorem; the additional ``style" argument changes the presentation by printing the
|
||||
formula into the output instead of the reference itself,
|
||||
\<^item> \<^theory_text>\<open>@{lemma \<open>prop\<close> } by \<open>method\<close>\<close> allows deriving \<open>prop\<close> on the fly, thus guarantee
|
||||
that it is a corrollary of the current context,
|
||||
\<^item> \<^theory_text>\<open>@{lemma \<open>prop\<close> by method} \<close> allows deriving \<open>prop\<close> on the fly, thus guarantee
|
||||
that it is a corollary of the current context,
|
||||
\<^item> \<^theory_text>\<open>@{term \<open>term\<close> }\<close> parses and type-checks \<open>term\<close>,
|
||||
\<^item> \<^theory_text>\<open>@{value \<open>term\<close> }\<close> performs the evaluation of \<open>term\<close>,
|
||||
\<^item> \<^theory_text>\<open>@{ML \<open>ml-term\<close> }\<close> parses and type-checks \<open>ml-term\<close>,
|
||||
\<^item> \<^theory_text>\<open>@{ML_file \<open>ml-file\<close> }\<close> parses the path for \<open>ml-file\<close> and
|
||||
verifies its existance in the (Isabelle-virtual) file-system.
|
||||
\<close>
|
||||
|
||||
text\<open>There are options to display sub-parts of formulas etc., but it is a consequence
|
||||
of tight-checking that the information must be given complete and exactly in the syntax of
|
||||
Isabelle. This may be over-precise and a burden to readers not familiar with Isabelle, which may
|
||||
|
@ -497,14 +547,17 @@ motivate authors to choose the aforementioned freeform-style.
|
|||
|
||||
Additionally, documents antiquotations were added to check and evaluate terms with
|
||||
term antiquotations:
|
||||
\<^item> \<^theory_text>\<open>@{term_ \<open>term\<close> }\<close> parses and type-checks \<open>term\<close> with term antiquotations,
|
||||
for instance \<^theory_text>\<open>\<^term_> \<open>@{technical \<open>isadof\<close>}\<close>\<close> will parse and check
|
||||
that \<open>isadof\<close> is indeed an instance of the class \<^typ>\<open>technical\<close>,
|
||||
\<^item> \<^theory_text>\<open>@{value_ \<open>term\<close> }\<close> performs the evaluation of \<open>term\<close> with term antiquotations,
|
||||
for instance \<^theory_text>\<open>@{value_ \<open>mcc @{cenelec-term \<open>FT\<close>}\<close>}\<close>
|
||||
will print the value of the \<^const>\<open>mcc\<close> attribute of the instance \<open>FT\<close>.
|
||||
for instance \<^theory_text>\<open>@{value_ \<open>definition_list @{technical \<open>isadof\<close>}\<close>}\<close>
|
||||
will print the value of the \<^const>\<open>definition_list\<close> attribute of the instance \<open>isadof\<close>.
|
||||
\<^theory_text>\<open>value_\<close> may have an optional argument between square brackets to specify the evaluator but this
|
||||
argument must be specified after a default optional argument already defined
|
||||
by the text antiquotation implementation.
|
||||
So one must use the following syntax if he does not want to specify the first optional argument:
|
||||
\<open>@{value_ [] [nbe] \<open>r @{F \<open>xcv4\<close>}\<close>}\<close>. Note the empty brackets.
|
||||
\<^theory_text>\<open>@{value_ [] [nbe] \<open>definition_list @{technical \<open>isadof\<close>}\<close>}\<close>. Note the empty brackets.
|
||||
\<close>
|
||||
|
||||
(*<*)
|
|
@ -13,9 +13,9 @@
|
|||
|
||||
(*<*)
|
||||
theory
|
||||
"04_RefMan"
|
||||
"M_04_RefMan"
|
||||
imports
|
||||
"03_GuidedTour"
|
||||
"M_03_GuidedTour"
|
||||
begin
|
||||
|
||||
declare_reference*[infrastructure::technical]
|
||||
|
@ -310,13 +310,24 @@ text\<open>
|
|||
section\<open>Fundamental Commands of the \<^isadof> Core\<close>
|
||||
text\<open>Besides the core-commands to define an ontology as presented in the previous section,
|
||||
the \<^isadof> core provides a number of mechanisms to \<^emph>\<open>use\<close> the resulting data to annotate
|
||||
text-elements and, in some cases, terms.
|
||||
text-elements and, in some cases, terms.
|
||||
In the following, we formally introduce the syntax of the core commands as
|
||||
supported on the Isabelle/Isar level. Some more advanced functionality of the core
|
||||
is currently only available in the SML API's of the kernel.
|
||||
|
||||
\<close>
|
||||
|
||||
subsection\<open>Syntax\<close>
|
||||
text\<open>In the following, we formally introduce the syntax of the core commands as
|
||||
supported on the Isabelle/Isar level. Some more advanced functionality of the core
|
||||
is currently only available in the SML API's of the kernel.
|
||||
text\<open>
|
||||
|
||||
|
||||
\<close>
|
||||
text\<open>Recall that except \<^theory_text>\<open>text*[]\<open>...\<close>\<close>, all \<^isadof> commands were mapped to visible
|
||||
layout; these commands have to be wrapped into
|
||||
\<^verbatim>\<open>(*<*) ... (*>*)\<close> if this is undesired. \<close>
|
||||
|
||||
subsection\<open>Ontological Text-Contexts and their Management\<close>
|
||||
text\<open>
|
||||
|
||||
\<^item> \<open>obj_id\<close>:\<^index>\<open>obj\_id@\<open>obj_id\<close>\<close> (or \<^emph>\<open>oid\<close>\<^index>\<open>oid!oid@\<open>see obj_id\<close>\<close> for short) a \<^emph>\<open>name\<close>
|
||||
as specified in @{technical \<open>odl-manual0\<close>}.
|
||||
|
@ -329,15 +340,10 @@ is currently only available in the SML API's of the kernel.
|
|||
symbolic evaluation using the simplifier, \<open>nbe\<close> for \<^emph>\<open>normalization by
|
||||
evaluation\<close> and \<^emph>\<open>code\<close> for code generation in SML.
|
||||
\<^item> \<open>upd_meta_args\<close> :
|
||||
\<^rail>\<open> (obj_id ('::' class_id) ((',' attribute (':=' | '+=') HOL_term) * ))\<close>
|
||||
\<^rail>\<open> (obj_id ('::' class_id) ((',' attribute (':=' | '+=') HOL_term ) * ))\<close>
|
||||
|
||||
\<^item> \<open>annotated_text_element\<close> :
|
||||
\<^rail>\<open>
|
||||
( @@{command "text*"} '[' meta_args ']' '\<open>' formal_text '\<close>'
|
||||
| @@{command "ML*"} '[' meta_args ']' '\<open>' SML_code '\<close>'
|
||||
| @@{command "term*"} ('[' meta_args ']')? '\<open>' HOL_term '\<close>'
|
||||
| (@@{command "value*"}
|
||||
| @@{command "assert*"}) \<newline> ('[' meta_args ']')? ('[' evaluator ']')? '\<open>' HOL_term '\<close>'
|
||||
)
|
||||
\<^rail>\<open>( @@{command "text*"} '[' meta_args ']' '\<open>' formal_text '\<close>' )
|
||||
\<close>
|
||||
\<^rail>\<open>
|
||||
( @@{command "open_monitor*"}
|
||||
|
@ -350,21 +356,10 @@ is currently only available in the SML API's of the kernel.
|
|||
\<close>
|
||||
\<^item> \<^isadof> \<open>change_status_command\<close> :
|
||||
\<^rail>\<open> (@@{command "update_instance*"} '[' upd_meta_args ']')\<close>
|
||||
\<^item> \<^isadof> \<open>inspection_command\<close> :
|
||||
\<^rail>\<open> @@{command "print_doc_classes"}
|
||||
| @@{command "print_doc_items"}
|
||||
| @@{command "check_doc_global"}\<close>
|
||||
\<^item> \<^isadof> \<open>macro_command\<close> :
|
||||
\<^rail>\<open> @@{command "define_shortcut*"} name ('\<rightleftharpoons>' | '==') '\<open>' string '\<close>'
|
||||
| @@{command "define_macro*"} name ('\<rightleftharpoons>' | '==')
|
||||
\<newline> '\<open>' string '\<close>' '_' '\<open>' string '\<close>' \<close>
|
||||
\<close>
|
||||
text\<open>Recall that except \<^theory_text>\<open>text*[]\<open>...\<close>\<close>, all \<^isadof> commands were mapped to visible
|
||||
layout; these commands have to be wrapped into
|
||||
\<^verbatim>\<open>(*<*) ... (*>*)\<close> if this is undesired. \<close>
|
||||
|
||||
subsection\<open>Ontological Text-Contexts and their Management\<close>
|
||||
text\<open> With respect to the family of text elements,
|
||||
|
||||
|
||||
With respect to the family of text elements,
|
||||
\<^theory_text>\<open>text*[oid::cid, ...] \<open> \<dots> text \<dots> \<close> \<close> is the core-command of \<^isadof>: it permits to create
|
||||
an object of meta-data belonging to the class \<^theory_text>\<open>cid\<close>\<^index>\<open>cid!cid@\<open>see class_id\<close>\<close>.
|
||||
This is viewed as the \<^emph>\<open>definition\<close> of an instance of a document class.
|
||||
|
@ -391,11 +386,22 @@ For a declared class \<^theory_text>\<open>cid\<close>, there exists a text anti
|
|||
The precise presentation is decided in the \<^emph>\<open>layout definitions\<close>, for example by suitable
|
||||
\<^LaTeX>-template code. Declared but not yet defined instances must be referenced with a particular
|
||||
pragma in order to enforce a relaxed checking \<^theory_text>\<open> @{cid (unchecked) \<open>oid\<close>} \<close>.
|
||||
The choice of the default class in a @{command "declare_reference*"}-command
|
||||
can be influenced by setting globally an attribute:
|
||||
@{boxed_theory_text [display]
|
||||
\<open>declare[[declare_reference_default_class = "definition"]]\<close>}
|
||||
Then in this example:
|
||||
@{boxed_theory_text [display]\<open>declare_reference*[def1]\<close>}
|
||||
\<open>def1\<close> will be a declared instance of the class \<open>definition\<close>.
|
||||
\<close>
|
||||
|
||||
subsection\<open>Ontological Code-Contexts and their Management\<close>
|
||||
|
||||
text\<open>The \<^theory_text>\<open>ML*[oid::cid, ...] \<open> \<dots> SML-code \<dots> \<close>\<close>-document elements proceed similarly:
|
||||
text\<open>
|
||||
\<^item> \<open>annotated_code_element\<close>:
|
||||
\<^rail>\<open>(@@{command "ML*"} '[' meta_args ']' '\<open>' SML_code '\<close>')\<close>
|
||||
|
||||
The \<^theory_text>\<open>ML*[oid::cid, ...] \<open> \<dots> SML-code \<dots> \<close>\<close>-document elements proceed similarly:
|
||||
a referentiable meta-object of class \<^theory_text>\<open>cid\<close> is created, initialized with the optional
|
||||
attributes and bound to \<^theory_text>\<open>oid\<close>. In fact, the entire the meta-argument list is optional.
|
||||
The SML-code is type-checked and executed
|
||||
|
@ -403,39 +409,139 @@ in the context of the SML toplevel of the Isabelle system as in the correspondin
|
|||
\<^theory_text>\<open>ML\<open> \<dots> SML-code \<dots> \<close>\<close>-command.
|
||||
Additionally, ML antiquotations were added to check and evaluate terms with
|
||||
term antiquotations:
|
||||
\<^item> \<^theory_text>\<open>@{term_ \<open>term\<close> }\<close> parses and type-checks \<open>term\<close> with term antiquotations,
|
||||
for instance \<^theory_text>\<open>@{term_ \<open>@{technical \<open>odl-manual1\<close>}\<close>}\<close> will parse and check
|
||||
that \<open>odl-manual1\<close> is indeed an instance of the class \<^typ>\<open>technical\<close>,
|
||||
\<^item> \<^theory_text>\<open>@{value_ \<open>term\<close> }\<close> performs the evaluation of \<open>term\<close> with term antiquotations,
|
||||
for instance \<^theory_text>\<open>@{value_ \<open>mcc @{cenelec-term \<open>FT\<close>}\<close>}\<close>
|
||||
will print the value of the \<^const>\<open>mcc\<close> attribute of the instance \<open>FT\<close>.
|
||||
for instance \<^theory_text>\<open>@{value_ \<open>definition_list @{technical \<open>odl-manual1\<close>}\<close>}\<close>
|
||||
will get the value of the \<^const>\<open>definition_list\<close> attribute of the instance \<open>odl-manual1\<close>.
|
||||
\<^theory_text>\<open>value_\<close> may have an optional argument between square brackets to specify the evaluator:
|
||||
\<open>@{value_ [nbe] \<open>mcc @{cenelec-term \<open>FT\<close>}\<close>}\<close> forces \<open>value_\<close> to evaluate
|
||||
\<^theory_text>\<open>@{value_ [nbe] \<open>definition_list @{technical \<open>odl-manual1\<close>}\<close>}\<close> forces \<open>value_\<close> to evaluate
|
||||
the term using normalization by evaluation (see @{cite "wenzel:isabelle-isar:2020"}).
|
||||
\<close>
|
||||
|
||||
(*<*)
|
||||
doc_class A =
|
||||
level :: "int option"
|
||||
x :: int
|
||||
definition*[a1::A, x=5, level="Some 1"] xx' where "xx' \<equiv> A.x @{A \<open>a1\<close>}" if "A.x @{A \<open>a1\<close>} = 5"
|
||||
|
||||
doc_class E =
|
||||
x :: "string" <= "''qed''"
|
||||
|
||||
doc_class cc_assumption_test =
|
||||
a :: int
|
||||
text*[cc_assumption_test_ref::cc_assumption_test]\<open>\<close>
|
||||
|
||||
definition tag_l :: "'a \<Rightarrow> 'b \<Rightarrow> 'b" where "tag_l \<equiv> \<lambda>x y. y"
|
||||
|
||||
lemma* tagged : "tag_l @{cc-assumption-test \<open>cc_assumption_test_ref\<close>} AA \<Longrightarrow> AA"
|
||||
by (simp add: tag_l_def)
|
||||
|
||||
find_theorems name:tagged "(_::cc_assumption_test \<Rightarrow> _ \<Rightarrow> _) _ _ \<Longrightarrow>_"
|
||||
|
||||
lemma*[e5::E] testtest : "xx + A.x @{A \<open>a1\<close>} = yy + A.x @{A \<open>a1\<close>} \<Longrightarrow> xx = yy" by simp
|
||||
|
||||
doc_class B'_test' =
|
||||
b::int
|
||||
|
||||
text*[b::B'_test']\<open>\<close>
|
||||
|
||||
term*\<open>@{B--test- \<open>b\<close>}\<close>
|
||||
|
||||
declare_reference*["text-elements-expls"::technical]
|
||||
(*>*)
|
||||
ML\<open>
|
||||
|
||||
\<close>
|
||||
subsection*["subsec:onto-term-ctxt"::technical]\<open>Ontological Term-Contexts and their Management\<close>
|
||||
text\<open>The major commands providing term-contexts are
|
||||
text\<open>
|
||||
\<^item> \<open>annotated_term_element\<close>
|
||||
\<^rail>\<open>
|
||||
(@@{command "term*"} ('[' meta_args ']')? '\<open>' HOL_term '\<close>'
|
||||
| (@@{command "value*"}
|
||||
| @@{command "assert*"}) \<newline> ('[' meta_args ']')? ('[' evaluator ']')? '\<open>' HOL_term '\<close>'
|
||||
| (@@{command "definition*"}) ('[' meta_args ']')?
|
||||
('... see ref manual')
|
||||
| (@@{command "lemma*"} | @@{command "theorem*"} | @@{command "corollary*"}
|
||||
| @@{command "proposition*"} | @@{command "schematic_goal*"}) ('[' meta_args ']')?
|
||||
('... see ref manual')
|
||||
)
|
||||
\<close>
|
||||
|
||||
For a declared class \<^theory_text>\<open>cid\<close>, there exists a term-antiquotation of the form \<^theory_text>\<open>@{cid \<open>oid\<close>}\<close>.
|
||||
Due to implementation of term-antiquotation using mixfix annotation
|
||||
(see @{cite "wenzel:isabelle-isar:2020"}),
|
||||
should a class \<open>cid\<close> contains an underscore or a single quote,
|
||||
they will be converted to hyphens in the term-antiquotation.
|
||||
For example:
|
||||
@{boxed_theory_text [display]
|
||||
\<open>doc_class B'_test' =
|
||||
b::int
|
||||
|
||||
text*[b::B'_test']\<open>\<close>
|
||||
|
||||
term*\<open>@{B--test- \<open>b\<close>}\<close>\<close>}
|
||||
|
||||
The major commands providing term-contexts are
|
||||
\<^theory_text>\<open>term*[oid::cid, ...] \<open> \<dots> HOL-term \<dots> \<close>\<close>,
|
||||
\<^theory_text>\<open>value*[oid::cid, ...] \<open> \<dots> HOL-term \<dots> \<close>\<close> and
|
||||
\<^theory_text>\<open>assert*[oid::cid, ...] \<open> \<dots> HOL-term \<dots> \<close>\<close>\<^footnote>\<open>The meta-argument list is optional.\<close>.
|
||||
Wrt. creation, track-ability and checking they are analogous to the ontological text and
|
||||
Wrt. creation, track-ability and checking these commands are analogous to the ontological text and
|
||||
code-commands. However the argument terms may contain term-antiquotations stemming from an
|
||||
ontology definition. Both term-contexts were type-checked and \<^emph>\<open>validated\<close> against
|
||||
the global context (so: in the term @{term_ \<open>@{author \<open>bu\<close>}\<close>}, \<open>bu\<close>
|
||||
ontology definition. Term-contexts were type-checked and \<^emph>\<open>validated\<close> against
|
||||
the global context (so: in the term @{term_ [source] \<open>@{author \<open>bu\<close>}\<close>}, \<open>bu\<close>
|
||||
is indeed a string which refers to a meta-object belonging
|
||||
to the document class \<^typ>\<open>author\<close>, for example).
|
||||
The term-context in the \<open>value*\<close>-command and \<^emph>\<open>assert*\<close>-command is additionally expanded
|
||||
Putting aside @{command "term*"}-command,
|
||||
the term-context in the other commands is additionally expanded
|
||||
(\<^eg> replaced) by a term denoting the meta-object.
|
||||
This expansion happens \<^emph>\<open>before\<close> evaluation of the term, thus permitting
|
||||
executable HOL-functions to interact with meta-objects.
|
||||
The \<^emph>\<open>assert*\<close>-command allows for logical statements to be checked in the global context
|
||||
The @{command "assert*"}-command allows for logical statements to be checked in the global context
|
||||
(see @{technical (unchecked) \<open>text-elements-expls\<close>}).
|
||||
% TODO:
|
||||
% Section reference @{docitem (unchecked) \<open>text-elements-expls\<close>} has not the right number
|
||||
This is particularly useful to explore formal definitions wrt. their border cases.
|
||||
|
||||
The @{command "definition*"}-command allows \<open>prop\<close>, \<open>spec_prems\<close>, and \<open>for_fixes\<close>
|
||||
(see the @{command "definition"} command in @{cite "wenzel:isabelle-isar:2020"}) to contain
|
||||
term-antiquotations. For example:
|
||||
@{boxed_theory_text [display]
|
||||
\<open>doc_class A =
|
||||
level :: "int option"
|
||||
x :: int
|
||||
definition*[a1::A, x=5, level="Some 1"] xx' where "xx' \<equiv> A.x @{A \<open>a1\<close>}" if "A.x @{A \<open>a1\<close>} = 5"\<close>}
|
||||
|
||||
The @{term_ [source] \<open>@{A \<open>a1\<close>}\<close>} term-antiquotation is used both in \<open>prop\<close> and in \<open>spec_prems\<close>.
|
||||
|
||||
@{command "lemma*"}, @{command "theorem*"}, etc., are extended versions of the goal commands
|
||||
defined in @{cite "wenzel:isabelle-isar:2020"}. Term-antiquotations can be used either in
|
||||
a \<open>long_statement\<close> or in a \<open>short_statement\<close>.
|
||||
For instance:
|
||||
@{boxed_theory_text [display]
|
||||
\<open>lemma*[e5::E] testtest : "xx + A.x @{A \<open>a1\<close>} = yy + A.x @{A \<open>a1\<close>} \<Longrightarrow> xx = yy"
|
||||
by simp\<close>}
|
||||
|
||||
This @{command "lemma*"}-command is defined using the @{term_ [source] \<open>@{A \<open>a1\<close>}\<close>}
|
||||
term-antiquotation and attaches the @{docitem_name "e5"} instance meta-data to the \<open>testtest\<close>-lemma.
|
||||
|
||||
@{boxed_theory_text [display]
|
||||
\<open>doc_class cc_assumption_test =
|
||||
a :: int
|
||||
text*[cc_assumption_test_ref::cc_assumption_test]\<open>\<close>
|
||||
|
||||
definition tag_l :: "'a \<Rightarrow> 'b \<Rightarrow> 'b" where "tag_l \<equiv> \<lambda>x y. y"
|
||||
|
||||
lemma* tagged : "tag_l @{cc-assumption-test \<open>cc_assumption_test_ref\<close>} AA \<Longrightarrow> AA"
|
||||
by (simp add: tag_l_def)
|
||||
|
||||
find_theorems name:tagged "(_::cc_assumption_test \<Rightarrow> _ \<Rightarrow> _) _ _ \<Longrightarrow>_"\<close>}
|
||||
|
||||
In this example, the definition \<^const>\<open>tag_l\<close> adds a tag to the \<open>tagged\<close> lemma using
|
||||
the term-antiquotation @{term_ [source] \<open>@{cc-assumption-test \<open>cc_assumption_test_ref\<close>}\<close>}
|
||||
inside the \<open>prop\<close> declaration.
|
||||
|
||||
Note unspecified attribute values were represented by free fresh variables which constrains \<^dof>
|
||||
to choose either the normalization-by-evaluation strategy \<^theory_text>\<open>nbe\<close> or a proof attempt via
|
||||
the \<^theory_text>\<open>auto\<close> method. A failure of these strategies will be reported and regarded as non-validation
|
||||
|
@ -447,7 +553,13 @@ declare_reference*["sec:advanced"::technical]
|
|||
(*>*)
|
||||
|
||||
subsection\<open>Status and Query Commands\<close>
|
||||
text\<open>\<^isadof> provides a number of inspection commands.
|
||||
|
||||
text\<open>
|
||||
\<^item> \<^isadof> \<open>inspection_command\<close> :
|
||||
\<^rail>\<open> @@{command "print_doc_classes"}
|
||||
| @@{command "print_doc_items"}
|
||||
| @@{command "check_doc_global"}\<close>
|
||||
\<^isadof> provides a number of inspection commands.
|
||||
\<^item> \<^theory_text>\<open>print_doc_classes\<close> allows to view the status of the internal
|
||||
class-table resulting from ODL definitions,
|
||||
\<^item> \<^ML>\<open>DOF_core.print_doc_class_tree\<close> allows for presenting (fragments) of
|
||||
|
@ -469,7 +581,13 @@ text\<open>\<^isadof> provides a number of inspection commands.
|
|||
\<close>
|
||||
|
||||
subsection\<open>Macros\<close>
|
||||
text\<open>There is a mechanism to define document-local macros which
|
||||
text\<open>
|
||||
\<^item> \<^isadof> \<open>macro_command\<close> :
|
||||
\<^rail>\<open> @@{command "define_shortcut*"} name ('\<rightleftharpoons>' | '==') '\<open>' string '\<close>'
|
||||
| @@{command "define_macro*"} name ('\<rightleftharpoons>' | '==')
|
||||
\<newline> '\<open>' string '\<close>' '_' '\<open>' string '\<close>' \<close>
|
||||
|
||||
There is a mechanism to define document-local macros which
|
||||
were PIDE-supported but lead to an expansion in the integrated source; this feature
|
||||
can be used to define
|
||||
\<^item> \<^theory_text>\<open>shortcuts\<close>, \<^ie>, short names that were expanded to, for example,
|
||||
|
@ -1016,6 +1134,7 @@ text\<open>
|
|||
closing of a monitor.
|
||||
For this use-case you can use low-level class invariants
|
||||
(see @{technical (unchecked) "sec:low_level_inv"}).
|
||||
Also, for now, term-antiquotations can not be used in an invariant formula.
|
||||
\<close>
|
||||
|
||||
subsection*["sec:monitors"::technical]\<open>ODL Monitors\<close>
|
||||
|
@ -1108,27 +1227,28 @@ text\<open>
|
|||
(note the long name of the class),
|
||||
is straight-forward:
|
||||
|
||||
\begin{sml}
|
||||
fun check_result_inv oid {is_monitor:bool} ctxt =
|
||||
@{boxed_sml [display]
|
||||
\<open>fun check_result_inv oid {is_monitor:bool} ctxt =
|
||||
let
|
||||
val kind =
|
||||
ISA_core.compute_attr_access ctxt "evidence" oid NONE <@>{here}
|
||||
ISA_core.compute_attr_access ctxt "evidence" oid NONE @{here}
|
||||
val prop =
|
||||
ISA_core.compute_attr_access ctxt "property" oid NONE <@>{here}
|
||||
ISA_core.compute_attr_access ctxt "property" oid NONE @{here}
|
||||
val tS = HOLogic.dest_list prop
|
||||
in case kind of
|
||||
<@>{term "proof"} => if not(null tS) then true
|
||||
@{term "proof"} => if not(null tS) then true
|
||||
else error("class result invariant violation")
|
||||
| _ => false
|
||||
| _ => true
|
||||
end
|
||||
val _ = Theory.setup (DOF_core.update_class_invariant
|
||||
"Low_Level_Syntax_Invariants.result" check_result_inv)
|
||||
\end{sml}
|
||||
val cid_long = DOF_core.get_onto_class_name_global "result" @{theory}
|
||||
val bind = Binding.name "Check_Result"
|
||||
val _ = Theory.setup (DOF_core.make_ml_invariant (check_result_inv, cid_long)
|
||||
|> DOF_core.add_ml_invariant bind)\<close>}
|
||||
|
||||
The \<^ML>\<open>Theory.setup\<close>-command (last line) registers the \<^boxed_theory_text>\<open>check_result_inv\<close> function
|
||||
into the \<^isadof> kernel, which activates any creation or modification of an instance of
|
||||
\<^boxed_theory_text>\<open>result\<close>. We cannot replace \<^boxed_theory_text>\<open>compute_attr_access\<close> by the
|
||||
corresponding antiquotation \<^boxed_theory_text>\<open>@{docitem_value kind::oid}\<close>, since
|
||||
corresponding antiquotation \<^boxed_theory_text>\<open>\<^value_>\<open>evidence @{result \<open>oid\<close>}\<close>\<close>, since
|
||||
\<^boxed_theory_text>\<open>oid\<close> is bound to a variable here and can therefore not be statically expanded.
|
||||
\<close>
|
||||
|
|
@ -12,8 +12,8 @@
|
|||
*************************************************************************)
|
||||
|
||||
(*<*)
|
||||
theory "05_Implementation"
|
||||
imports "04_RefMan"
|
||||
theory "M_05_Implementation"
|
||||
imports "M_04_RefMan"
|
||||
begin
|
||||
(*>*)
|
||||
|
||||
|
@ -108,15 +108,21 @@ val attributes =(Parse.$$$ "[" |-- (reference
|
|||
|--(Parse.enum ","attribute)))[]))--| Parse.$$$ "]"
|
||||
\end{sml}
|
||||
|
||||
The ``model'' \<^boxed_sml>\<open>declare_reference_opn\<close> and ``new'' \<^boxed_sml>\<open>attributes\<close> parts were
|
||||
The ``model'' \<^boxed_sml>\<open>create_and_check_docitem\<close> and ``new''
|
||||
\<^boxed_sml>\<open>ODL_Meta_Args_Parser.attributes\<close> parts were
|
||||
combined via the piping operator and registered in the Isar toplevel:
|
||||
|
||||
@{boxed_sml [display]
|
||||
\<open>fun declare_reference_opn (((oid,_),_),_) =
|
||||
(Toplevel.theory (DOF_core.declare_object_global oid))
|
||||
val _ = Outer_Syntax.command <@>{command_keyword "declare_reference"}
|
||||
"declare document reference"
|
||||
(attributes >> declare_reference_opn);\<close>}
|
||||
\<open>val _ =
|
||||
let fun create_and_check_docitem (((oid, pos),cid_pos),doc_attrs)
|
||||
= (Value_Command.Docitem_Parser.create_and_check_docitem
|
||||
{is_monitor = false} {is_inline=true}
|
||||
{define = false} oid pos (cid_pos) (doc_attrs))
|
||||
in Outer_Syntax.command @{command_keyword "declare_reference*"}
|
||||
"declare document reference"
|
||||
(ODL_Meta_Args_Parser.attributes
|
||||
>> (Toplevel.theory o create_and_check_docitem))
|
||||
end;\<close>}
|
||||
|
||||
Altogether, this gives the extension of Isabelle/HOL with Isar syntax and semantics for the
|
||||
new \emph{command}:
|
||||
|
@ -126,7 +132,7 @@ declare_reference* [lal::requirement, alpha="main", beta=42]
|
|||
\<close>}
|
||||
|
||||
The construction also generates implicitly some markup information; for example, when hovering
|
||||
over the \<^boxed_theory_text>\<open>declare_reference\<close> command in the IDE, a popup window with the text:
|
||||
over the \<^boxed_theory_text>\<open>declare_reference*\<close> command in the IDE, a popup window with the text:
|
||||
``declare document reference'' will appear.
|
||||
\<close>
|
||||
|
||||
|
@ -137,12 +143,10 @@ text\<open>
|
|||
can be added to the system that works on the internal plugin-data freely. For example, in
|
||||
|
||||
@{boxed_sml [display]
|
||||
\<open>val _ = Theory.setup(
|
||||
Thy_Output.antiquotation <@>{binding docitem}
|
||||
docitem_antiq_parser
|
||||
(docitem_antiq_gen default_cid) #>
|
||||
ML_Antiquotation.inline <@>{binding docitem_value}
|
||||
ML_antiq_docitem_value)\<close>}
|
||||
\<open>val _ = Theory.setup
|
||||
(docitem_antiquotation @{binding "docitem"} DOF_core.default_cid #>
|
||||
|
||||
ML_Antiquotation.inline @{binding "docitem_value"} ML_antiquotation_docitem_value)\<close>}
|
||||
the text antiquotation \<^boxed_sml>\<open>docitem\<close> is declared and bounded to a parser for the argument
|
||||
syntax and the overall semantics. This code defines a generic antiquotation to be used in text
|
||||
elements such as
|
12
README.md
|
@ -35,7 +35,7 @@ Isabelle/DOF is currently consisting out of two AFP entries:
|
|||
|
||||
* [Isabelle_DOF:](https://www.isa-afp.org/entries/Isabelle_DOF.html) This entry
|
||||
contains the Isabelle/DOF system itself, including the Isabelle/DOF manual.
|
||||
* [Isabelle_DOF-Example-Scholarly_Paper:](https://www.isa-afp.org/entries/Isabelle_DOF-Example-Scholarly_Paper.html)
|
||||
* [Isabelle_DOF-Example-I:](https://www.isa-afp.org/entries/Isabelle_DOF-Example-I.html)
|
||||
This entry contains an example of an academic paper written using the
|
||||
Isabelle/DOF system.
|
||||
|
||||
|
@ -72,16 +72,16 @@ foo@bar:~$ isabelle build -d . -o 'timeout_scale=2' Isabelle_DOF-Proofs
|
|||
## Usage
|
||||
|
||||
In the following, we assume that you installed Isabelle/DOF either from the AFP
|
||||
(adding the AFP as a component to your Isabelle system) or from the GIT
|
||||
(adding the AFP as a component to your Isabelle system) or from the Git
|
||||
repository of Isabelle/DOF (installing Isabelle/DOF as a component to your
|
||||
Isabelle system).
|
||||
|
||||
Assuming that your current directory contains the example academic paper in the
|
||||
subdirectory ``Isabelle_DOF-Example-Scholarly_Paper/``, you can open it similar
|
||||
subdirectory ``Isabelle_DOF-Example-I/``, you can open it similar
|
||||
to any standard Isabelle theory:
|
||||
|
||||
```console
|
||||
isabelle jedit -l Isabelle_DOF Isabelle_DOF-Example-Scholarly_Paper/IsaDofApplications.thy
|
||||
isabelle jedit -l Isabelle_DOF Isabelle_DOF-Example-I/IsaDofApplications.thy
|
||||
```
|
||||
|
||||
This will open an example of a scientific paper using the pre-compiled session
|
||||
|
@ -90,7 +90,7 @@ defined in the ``Isabelle_DOF`` session. If you want to edit the ontology defin
|
|||
just open the theory file with the session ``Functional-Automata``:
|
||||
|
||||
```console
|
||||
isabelle jedit -l Functional-Automata Isabelle_DOF-Example-Scholarly_Paper/IsaDofApplications.thy
|
||||
isabelle jedit -l Functional-Automata Isabelle_DOF-Example-I/IsaDofApplications.thy
|
||||
```
|
||||
|
||||
While this gives you more flexibility, it might "clutter" your editing
|
||||
|
@ -109,7 +109,7 @@ in a subdirectory:
|
|||
* [Isabelle_DOF](./Isabelle_DOF/): This is the main session, providing the
|
||||
Isabelle/DOF system. Furthermore, this session is currently under
|
||||
consideration for a submission to the AFP.
|
||||
* [Isabelle_DOF-Example-Scholarly_Paper](./Isabelle_DOF-Example-Scholarly_Paper/):
|
||||
* [Isabelle_DOF-Example-I](./Isabelle_DOF-Example-I/):
|
||||
This session provides an example document written Isabelle/DOF. It only
|
||||
requires the core ontologies provided by the ``Isabelle_DOF`` session.
|
||||
Furthermore, this session is currently under consideration for a submission to
|
||||
|
|
|
@ -28,10 +28,10 @@ complete AFP. For this, please follow the instructions given at
|
|||
<!--
|
||||
As Isabelle session names need to be
|
||||
unique, you will need to disable the entries ``Isabelle_DOF`` and
|
||||
``Isabelle_DOF-Example-Scholarly_Paper`` provided as part of the AFP. For this,
|
||||
``Isabelle_DOF-Example-I`` provided as part of the AFP. For this,
|
||||
you will need to edit the file ``$AFP/thys/ROOTS`` (where ``$AFP`` refers to the
|
||||
directory in which you installed the AFP) and delete the two entries
|
||||
``Isabelle_DOF`` and ``Isabelle_DOF-Example-Scholarly_Paper``.
|
||||
``Isabelle_DOF`` and ``Isabelle_DOF-Example-I``.
|
||||
-->
|
||||
|
||||
For the development version of Isabelle, installing the complete AFP
|
||||
|
|