Compare commits

...

17 Commits

Author SHA1 Message Date
Achim D. Brucker 26774fc053 Merge branch 'afp_resubmission' of git.logicalhacking.com:Isabelle_DOF/Isabelle_DOF into afp_resubmission 2024-04-18 09:11:26 +01:00
Burkhart Wolff 7d6048bf64 Merge branch 'afp_resubmission' of https://git.logicalhacking.com/Isabelle_DOF/Isabelle_DOF into afp_resubmission 2024-04-16 11:11:07 +02:00
Burkhart Wolff 231892cd23 ... 2024-04-16 11:11:04 +02:00
Nicolas Méric c945da75fa Proof reading 2024-04-15 14:28:31 +02:00
Nicolas Méric b554f20a5c Move onto_morphism command to isabelle dof core 2024-04-15 12:57:42 +02:00
Burkhart Wolff 734c1953bd polishing 2024-04-12 18:52:57 +02:00
Burkhart Wolff a735e9a1f2 roughly complete section 5 on proofs over ontologies. 2024-04-12 18:42:04 +02:00
Burkhart Wolff 7c2a6099f8 section on previewer 2024-04-11 21:08:36 +02:00
Burkhart Wolff 6dfefc6b4e some elements in the proof chapter 2024-04-09 09:27:14 +02:00
Burkhart Wolff 3235410af3 some elements of chapter Proof_Ontologies. 2024-04-07 22:07:40 +02:00
Burkhart Wolff 4745c58803 some elements of chapter Proof_Ontologies. 2024-04-07 22:04:09 +02:00
Burkhart Wolff 28d1fa926e Merge branch 'main' into afp_resubmission 2024-04-07 17:19:24 +02:00
Nicolas Méric 93ef94cddb Move some theorems in regexpinterface 2024-04-05 14:56:55 +02:00
Nicolas Méric 20e90f688f Merge branch 'main' into afp_resubmission 2024-04-05 14:30:03 +02:00
Nicolas Méric 51d93e38f8 Add constants for first monitor accepts clause 2024-04-04 12:08:35 +02:00
Nicolas Méric 7791538b54 Update to isabelle 2023 and add morphism examples 2024-04-04 12:08:25 +02:00
Achim D. Brucker f61e107515 Updated scala parts to Isabelle 2023. 2024-04-02 12:36:44 +01:00
13 changed files with 920 additions and 133 deletions

View File

@ -37,13 +37,14 @@ fun gen_enriched_document_command2 name {body} cid_transform attr_transform mark
xstring_opt:(xstring * Position.T) option),
toks_list:Input.source list)
: theory -> theory =
let val ((binding,cid_pos), doc_attrs) = meta_args
let val toplvl = Toplevel.make_state o SOME
val ((binding,cid_pos), doc_attrs) = meta_args
val oid = Binding.name_of binding
val oid' = if meta_args = ODL_Meta_Args_Parser.empty_meta_args
then "output"
else oid
(* as side-effect, generates markup *)
fun check_n_tex_text thy toks = let val ctxt = Toplevel.presentation_context (Toplevel.make_state (SOME thy))
fun check_n_tex_text thy toks = let val ctxt = Toplevel.presentation_context (toplvl thy)
val pos = Input.pos_of toks;
val _ = Context_Position.reports ctxt
[(pos, Markup.language_document (Input.is_delimited toks)),

View File

@ -22,8 +22,10 @@ session "Isabelle_DOF" (AFP) = "Functional-Automata" +
"thys/manual/M_01_Introduction"
"thys/manual/M_02_Background"
"thys/manual/M_03_GuidedTour"
"thys/manual/M_04_RefMan"
"thys/manual/M_05_Implementation"
"thys/manual/M_04_Document_Ontology"
"thys/manual/M_05_Proofs_Ontologies"
"thys/manual/M_06_RefMan"
"thys/manual/M_07_Implementation"
"thys/manual/Isabelle_DOF_Manual"
document_files
"root.bib"
@ -51,6 +53,7 @@ session "Isabelle_DOF" (AFP) = "Functional-Automata" +
"figures/doc-mod-onto-docinst.pdf"
"figures/doc-mod-DOF.pdf"
"figures/doc-mod-term-aq.pdf"
"figures/ThisPaperWithPreviewer.png"
export_classpath

View File

@ -2,6 +2,8 @@
\input{M_01_Introduction.tex}
\input{M_02_Background.tex}
\input{M_03_GuidedTour.tex}
\input{M_04_RefMan.tex}
\input{M_05_Implementation.tex}
\input{M_04_Document_Ontology.tex}
\input{M_05_Proofs_Ontologies.tex}
\input{M_06_RefMan.tex}
\input{M_07_Implementation.tex}
\input{Isabelle_DOF_Manual.tex}

Binary file not shown.

After

Width:  |  Height:  |  Size: 541 KiB

View File

@ -10,6 +10,39 @@
year = 2015
}
@
Book{ books/daglib/0032976,
added-at = {2014-03-12T00:00:00.000+0100},
author = {Euzenat, J{\~A}<7D>r{\~A}<7D>me and Shvaiko, Pavel},
biburl = {https://www.bibsonomy.org/bibtex/28d5372a81f181d9d5a761ca12209cf39/dblp},
interhash = {fc55a5b84d114e38db0a0303cc1bd7da},
intrahash = {8d5372a81f181d9d5a761ca12209cf39},
isbn = {978-3-642-38720-3},
keywords = {dblp},
pages = {I-XVII, 1--511},
publisher = {Springer},
timestamp = {2015-06-18T09:49:52.000+0200},
title = {Ontology Matching, Second Edition.},
year = 2013,
doi = {10.1007/978-3-642-38721-0}
}
@Misc{ atl,
title = {{ATL} -- A model transformation technology},
url = {https://www.eclipse.org/atl/},
author = {{Eclipse Foundation}},
}
@InProceedings{ BGPP95,
author = {Yamine A{\"{\i}}t Ameur and Frederic Besnard and Patrick Girard and Guy Pierra and Jean{-}Claude
Potier},
title = {Formal Specification and Metaprogramming in the {EXPRESS} Language},
booktitle = {The 7th International Conference on Software Engineering and Knowledge Engineering (SEKE)},
pages = {181--188},
publisher = {Knowledge Systems Institute},
year = 1995
}
@Misc{ ibm:doors:2019,
author = {IBM},
title = {{IBM} Engineering Requirements Management {DOORS} Family},

View File

@ -82,50 +82,8 @@ doc_class report =
\<lbrakk>index\<rbrakk> ~~ \<lbrace>annex\<rbrace>\<^sup>* )"
section\<open>Experimental\<close>
ML\<open> fun get_class_2_ML ctxt (str,_) =
let val thy = Context.theory_of ctxt
val DOF_core.Onto_Class S = (DOF_core.get_onto_class_global' str thy)
in ML_Syntax.atomic(ML_Syntax.print_string(@{make_string} S)) end \<close>
setup\<open>ML_Antiquotation.inline \<^binding>\<open>doc_class2\<close>
(fn (ctxt,toks) => (AttributeAccess.parse_cid >> get_class_2_ML ctxt) (ctxt, toks))\<close>
ML\<open>@{term \<open>a + b\<close>}\<close>
ML\<open>@{doc_class2 "report"}\<close>
ML\<open>
fun constant_def (decl, spec, prems, params) = #2 o Specification.definition decl params prems spec
fun meta_eq_const T = Const (\<^const_name>\<open>Pure.eq\<close>, T --> T --> propT);
fun mk_meta_eq (t, u) = meta_eq_const (fastype_of t) $ t $ u;
val rexp_ty = @{typ "doc_class rexp"}
fun define (binding, rhs) (lthy)=
let val Const(cname, _) = Syntax.read_term lthy (Binding.name_of binding)
val lhs = Const(cname, rexp_ty)
val bdg = Binding.suffix_name "_monitor" binding
val eq = mk_meta_eq(Free(Binding.name_of bdg, rexp_ty),rhs)
val args = (SOME(bdg,NONE,NoSyn), (Binding.empty_atts,eq),[],[]) ;
in constant_def args lthy end;
\<close>
ML\<open> val DOF_core.Onto_Class S = (DOF_core.get_onto_class_global' "report" @{theory}) \<close>
setup\<open>Named_Target.theory_map(define (#name S, hd (#rex S))) \<close>
ML\<open> val DOF_core.Onto_Class S = (DOF_core.get_onto_class_global' "article" @{theory}) \<close>
setup\<open>Named_Target.theory_map(define (#name S, hd (#rex S))) \<close>
(* switch on regexp syntax *)
notation Star ("\<lbrace>(_)\<rbrace>\<^sup>*" [0]100)
notation Plus (infixr "||" 55)
@ -133,72 +91,6 @@ notation Times (infixr "~~" 60)
notation Atom ("\<lfloor>_\<rfloor>" 65)
lemma regexp_sub : "a \<le> b \<Longrightarrow> L\<^sub>s\<^sub>u\<^sub>b (\<lfloor>a\<rfloor>) \<subseteq> L\<^sub>s\<^sub>u\<^sub>b (\<lfloor>b\<rfloor>)"
using dual_order.trans by auto
lemma regexp_seq_mono:
"Lang(a) \<subseteq> Lang (a') \<Longrightarrow> Lang(b) \<subseteq> Lang (b') \<Longrightarrow> Lang(a ~~ b) \<subseteq> Lang(a' ~~ b')" by auto
lemma regexp_seq_mono':
"L\<^sub>s\<^sub>u\<^sub>b(a) \<subseteq> L\<^sub>s\<^sub>u\<^sub>b (a') \<Longrightarrow> L\<^sub>s\<^sub>u\<^sub>b(b) \<subseteq> L\<^sub>s\<^sub>u\<^sub>b (b') \<Longrightarrow> L\<^sub>s\<^sub>u\<^sub>b(a ~~ b) \<subseteq> L\<^sub>s\<^sub>u\<^sub>b(a' ~~ b')" by auto
lemma regexp_alt_mono :"Lang(a) \<subseteq> Lang (a') \<Longrightarrow> Lang(a || b) \<subseteq> Lang(a' || b)" by auto
lemma regexp_alt_mono' :"L\<^sub>s\<^sub>u\<^sub>b(a) \<subseteq> L\<^sub>s\<^sub>u\<^sub>b (a') \<Longrightarrow> L\<^sub>s\<^sub>u\<^sub>b(a || b) \<subseteq> L\<^sub>s\<^sub>u\<^sub>b(a' || b)" by auto
lemma regexp_alt_commute : "Lang(a || b) = Lang(b || a)" by auto
lemma regexp_alt_commute' : "L\<^sub>s\<^sub>u\<^sub>b(a || b) = L\<^sub>s\<^sub>u\<^sub>b(b || a)" by auto
lemma regexp_unit_right : "Lang (a) = Lang (a ~~ One) " by simp
lemma regexp_unit_right' : "L\<^sub>s\<^sub>u\<^sub>b (a) = L\<^sub>s\<^sub>u\<^sub>b (a ~~ One) " by simp
lemma regexp_unit_left : "Lang (a) = Lang (One ~~ a) " by simp
lemma regexp_unit_left' : "L\<^sub>s\<^sub>u\<^sub>b (a) = L\<^sub>s\<^sub>u\<^sub>b (One ~~ a) " by simp
lemma opt_star_incl :"Lang (opt a) \<subseteq> Lang (Star a)" by (simp add: opt_def subset_iff)
lemma opt_star_incl':"L\<^sub>s\<^sub>u\<^sub>b (opt a) \<subseteq> L\<^sub>s\<^sub>u\<^sub>b (Star a)" by (simp add: opt_def subset_iff)
lemma rep1_star_incl:"Lang (rep1 a) \<subseteq> Lang (Star a)"
unfolding rep1_def by(subst L_Star, subst L_Conc)(force)
lemma rep1_star_incl':"L\<^sub>s\<^sub>u\<^sub>b (rep1 a) \<subseteq> L\<^sub>s\<^sub>u\<^sub>b (Star a)"
unfolding rep1_def by(subst L\<^sub>s\<^sub>u\<^sub>b_Star, subst L\<^sub>s\<^sub>u\<^sub>b_Conc)(force)
lemma cancel_rep1 : "Lang (a) \<subseteq> Lang (rep1 a)"
unfolding rep1_def by auto
lemma cancel_rep1' : "L\<^sub>s\<^sub>u\<^sub>b (a) \<subseteq> L\<^sub>s\<^sub>u\<^sub>b (rep1 a)"
unfolding rep1_def by auto
lemma seq_cancel_opt : "Lang (a) \<subseteq> Lang (c) \<Longrightarrow> Lang (a) \<subseteq> Lang (opt b ~~ c)"
by(subst regexp_unit_left, rule regexp_seq_mono)(simp_all add: opt_def)
lemma seq_cancel_opt' : "L\<^sub>s\<^sub>u\<^sub>b (a) \<subseteq> L\<^sub>s\<^sub>u\<^sub>b (c) \<Longrightarrow> L\<^sub>s\<^sub>u\<^sub>b (a) \<subseteq> L\<^sub>s\<^sub>u\<^sub>b (opt b ~~ c)"
by(subst regexp_unit_left', rule regexp_seq_mono')(simp_all add: opt_def)
lemma seq_cancel_Star : "Lang (a) \<subseteq> Lang (c) \<Longrightarrow> Lang (a) \<subseteq> Lang (Star b ~~ c)"
by auto
lemma seq_cancel_Star' : "L\<^sub>s\<^sub>u\<^sub>b (a) \<subseteq> L\<^sub>s\<^sub>u\<^sub>b (c) \<Longrightarrow> L\<^sub>s\<^sub>u\<^sub>b (a) \<subseteq> L\<^sub>s\<^sub>u\<^sub>b (Star b ~~ c)"
by auto
lemma mono_Star : "Lang (a) \<subseteq> Lang (b) \<Longrightarrow> Lang (Star a) \<subseteq> Lang (Star b)"
by(auto)(metis in_star_iff_concat order.trans)
lemma mono_Star' : "L\<^sub>s\<^sub>u\<^sub>b (a) \<subseteq> L\<^sub>s\<^sub>u\<^sub>b (b) \<Longrightarrow> L\<^sub>s\<^sub>u\<^sub>b (Star a) \<subseteq> L\<^sub>s\<^sub>u\<^sub>b (Star b)"
by(auto)(metis in_star_iff_concat order.trans)
lemma mono_rep1_star:"Lang (a) \<subseteq> Lang (b) \<Longrightarrow> Lang (rep1 a) \<subseteq> Lang (Star b)"
using mono_Star rep1_star_incl by blast
lemma mono_rep1_star':"L\<^sub>s\<^sub>u\<^sub>b (a) \<subseteq> L\<^sub>s\<^sub>u\<^sub>b (b) \<Longrightarrow> L\<^sub>s\<^sub>u\<^sub>b (rep1 a) \<subseteq> L\<^sub>s\<^sub>u\<^sub>b (Star b)"
using mono_Star' rep1_star_incl' by blast
text\<open>Not a terribly deep theorem, but an interesting property of consistency between
ontologies - so, a lemma that shouldn't break if the involved ontologies were changed.
@ -214,7 +106,6 @@ theorem articles_sub_reports: \<open>(Lang article_monitor) \<subseteq> Lang rep
text\<open>The proof proceeds by blindly applying the monotonicity rules
on the language of regular expressions.\<close>
print_doc_classes
text\<open>All Class-Id's --- should be generated.\<close>
lemmas class_ids =

View File

@ -36,9 +36,12 @@ theory Isa_DOF (* Isabelle Document Ontology Framework *)
and "open_monitor*" "close_monitor*"
"declare_reference*" "update_instance*"
"doc_class" "onto_class" (* a syntactic alternative *)
"ML*"
"doc_class" "onto_class" (* a syntactic alternative *)
"onto_morphism" :: thy_decl
and "to"
and "ML*"
"define_shortcut*" "define_macro*" :: thy_decl
and "definition*" :: thy_defn
and "theorem*" "lemma*" "corollary*" "proposition*" :: thy_goal_stmt
and "schematic_goal*" :: thy_goal_stmt
@ -74,6 +77,7 @@ val def_suffixN = "_" ^ defN
val defsN = defN ^ "s"
val instances_of_suffixN = "_instances"
val invariant_suffixN = "_inv"
val monitor_suffixN = "_monitor"
val instance_placeholderN = "\<sigma>"
val makeN = "make"
val schemeN = "_scheme"
@ -2359,6 +2363,7 @@ fun onto_macro_cmd_command (name, pos) descr cmd output_cmd =
))))
(* Core Command Definitions *)
val _ =
@ -2691,7 +2696,9 @@ fun gen_theorem schematic bundle_includes prep_att prep_stmt
|> prep_statement (prep_att lthy) prep_stmt elems raw_concl;
val atts = more_atts @ map (prep_att lthy) raw_atts;
val print_cfg = {interactive = int, pos = Position.thread_data (), proof_state= false}
val pos = Position.thread_data ();
val print_results =
Proof_Display.print_results {interactive = int, pos = pos, proof_state = false};
fun after_qed' results goal_ctxt' =
let
@ -2703,13 +2710,13 @@ fun gen_theorem schematic bundle_includes prep_att prep_stmt
Local_Theory.notes_kind kind
(map2 (fn (b, _) => fn ths => (b, [(ths, [])])) stmt results') lthy;
val lthy'' =
if Binding.is_empty_atts (name, atts) then
(Proof_Display.print_results print_cfg lthy' ((kind, ""), res); lthy')
if Binding.is_empty_atts (name, atts)
then (print_results lthy' ((kind, ""), res); lthy')
else
let
val ([(res_name, _)], lthy'') =
Local_Theory.notes_kind kind [((name, atts), [(maps #2 res, [])])] lthy';
val _ = Proof_Display.print_results print_cfg lthy' ((kind, res_name), res);
val _ = print_results lthy' ((kind, res_name), res);
in lthy'' end;
in after_qed results' lthy'' end;
@ -3082,6 +3089,18 @@ fun define_inv (bind, inv) thy =
|> Syntax.check_term (Proof_Context.init_global thy)
in thy |> Named_Target.theory_map (define_cond bind eq) end
fun define_monitor_const doc_class_name bind thy =
let val bname = Long_Name.base_name doc_class_name
val rex = DOF_core.rex_of doc_class_name thy
val monitor_binding = bind |> (Binding.qualify false bname
#> Binding.suffix_name monitor_suffixN)
in
if can hd rex
then
let val eq = Logic.mk_equals (Free(Binding.name_of monitor_binding, doc_class_rexp_T), hd rex)
in thy |> Named_Target.theory_map (define_cond monitor_binding eq) end
else thy
end
fun add_doc_class_cmd overloaded (raw_params, binding)
raw_parent raw_fieldsNdefaults reject_Atoms regexps invariants thy =
@ -3152,9 +3171,10 @@ fun add_doc_class_cmd overloaded (raw_params, binding)
| SOME _ => if (not o null) record_fields
then add record_fields invariants' {virtual=false}
else add [DOF_core.tag_attr] invariants' {virtual=true})
|> (fn thy => OntoLinkParser.docitem_antiquotation binding (cid thy) thy)
(* defines the ontology-checked text antiquotation to this document class *)
|> (fn thy => OntoLinkParser.docitem_antiquotation binding (cid thy) thy)
|> (fn thy => fold define_inv (invariants') thy)
|> (fn thy => define_monitor_const (cid thy) binding thy)
(* The function declare_ISA_class_accessor_and_check_instance uses a prefix
because the class name is already bound to "doc_class Regular_Exp.rexp" constant
by add_doc_class_cmd function *)
@ -3194,6 +3214,76 @@ val _ =
val clean_mixfix_sub = translate_string
(fn "\<^sub>_" => "_"
| "\<^sub>'" => "'"
| c => c);
val prefix_sub = prefix "\<^sub>"
val convertN = "convert"
fun add_onto_morphism classes_mappings eqs thy =
if (length classes_mappings = length eqs) then
let
val specs = map (fn x => (Binding.empty_atts, x)) eqs
val converts =
map (fn (oclasses, dclass) =>
let
val oclasses_string = map YXML.content_of oclasses
val dclass_string = YXML.content_of dclass
val const_sub_name = dclass_string
|> (oclasses_string |> fold_rev (fn x => fn y => x ^ "_" ^ y))
|> String.explode |> map (fn x => "\<^sub>" ^ (String.str x)) |> String.concat
val convert_typ = oclasses_string |> rev |> hd
|> (oclasses_string |> rev |> tl |> fold (fn x => fn y => x ^ " \<times> " ^ y))
val convert_typ' = convert_typ ^ " \<Rightarrow> " ^ dclass_string
val oclasses_sub_string = oclasses_string
|> map (clean_mixfix_sub
#> String.explode
#> map (prefix_sub o String.str)
#> String.concat)
val mixfix = oclasses_sub_string |> rev |> hd
|> (oclasses_sub_string |> rev |> tl |> fold (fn x => fn y => x ^ "\<^sub>\<times>" ^ y))
|> ISA_core.clean_mixfix
val mixfix' = convertN ^ mixfix ^ "\<^sub>\<Rightarrow>"
^ (dclass_string |> clean_mixfix_sub |> String.explode
|> map (prefix_sub o String.str) |> String.concat)
in SOME (Binding.name (convertN ^ const_sub_name), SOME convert_typ', Mixfix.mixfix mixfix') end)
classes_mappings
val args = map (fn (x, y) => (x, y, [], [])) (converts ~~ specs)
val lthy = Named_Target.theory_init thy
val updated_lthy = fold (fn (decl, spec, prems, params) => fn lthy =>
let
val (_, lthy') = Specification.definition_cmd decl params prems spec true lthy
in lthy' end) args lthy
in Local_Theory.exit_global updated_lthy end
(* alternative way to update the theory using the Theory.join_theory function *)
(*val lthys = map (fn (decl, spec, prems, params) =>
let
val (_, lthy') = Specification.definition_cmd decl params prems spec true lthy
in lthy' end) args
val thys = map (Local_Theory.exit_global) lthys
in Theory.join_theory thys end*)
else error("The number of morphisms declarations does not match the number of definitions")
fun add_onto_morphism' (classes_mappings, eqs) = add_onto_morphism classes_mappings eqs
val parse_onto_morphism = Parse.and_list
((Parse.$$$ "(" |-- Parse.enum1 "," Parse.typ --| Parse.$$$ ")" --| \<^keyword>\<open>to\<close>)
-- Parse.typ)
-- (\<^keyword>\<open>where\<close> |-- Parse.and_list Parse.prop)
(* The name of the definitions must follow this rule:
for the declaration "onto_morphism (AA, BB) to CC",
the name of the constant must be "convert\<^sub>A\<^sub>A\<^sub>\<times>\<^sub>B\<^sub>B\<^sub>\<Rightarrow>\<^sub>C\<^sub>C". *)
val _ =
Outer_Syntax.command \<^command_keyword>\<open>onto_morphism\<close> "define ontology morpism"
(parse_onto_morphism >> (Toplevel.theory o add_onto_morphism'));
end (* struct *)
\<close>

View File

@ -236,6 +236,72 @@ end; (* local *)
end (* struct *)
\<close>
lemma regexp_sub : "a \<le> b \<Longrightarrow> L\<^sub>s\<^sub>u\<^sub>b (\<lfloor>a\<rfloor>) \<subseteq> L\<^sub>s\<^sub>u\<^sub>b (\<lfloor>b\<rfloor>)"
using dual_order.trans by auto
lemma regexp_seq_mono:
"Lang(a) \<subseteq> Lang (a') \<Longrightarrow> Lang(b) \<subseteq> Lang (b') \<Longrightarrow> Lang(a ~~ b) \<subseteq> Lang(a' ~~ b')" by auto
lemma regexp_seq_mono':
"L\<^sub>s\<^sub>u\<^sub>b(a) \<subseteq> L\<^sub>s\<^sub>u\<^sub>b (a') \<Longrightarrow> L\<^sub>s\<^sub>u\<^sub>b(b) \<subseteq> L\<^sub>s\<^sub>u\<^sub>b (b') \<Longrightarrow> L\<^sub>s\<^sub>u\<^sub>b(a ~~ b) \<subseteq> L\<^sub>s\<^sub>u\<^sub>b(a' ~~ b')" by auto
lemma regexp_alt_mono :"Lang(a) \<subseteq> Lang (a') \<Longrightarrow> Lang(a || b) \<subseteq> Lang(a' || b)" by auto
lemma regexp_alt_mono' :"L\<^sub>s\<^sub>u\<^sub>b(a) \<subseteq> L\<^sub>s\<^sub>u\<^sub>b (a') \<Longrightarrow> L\<^sub>s\<^sub>u\<^sub>b(a || b) \<subseteq> L\<^sub>s\<^sub>u\<^sub>b(a' || b)" by auto
lemma regexp_alt_commute : "Lang(a || b) = Lang(b || a)" by auto
lemma regexp_alt_commute' : "L\<^sub>s\<^sub>u\<^sub>b(a || b) = L\<^sub>s\<^sub>u\<^sub>b(b || a)" by auto
lemma regexp_unit_right : "Lang (a) = Lang (a ~~ One) " by simp
lemma regexp_unit_right' : "L\<^sub>s\<^sub>u\<^sub>b (a) = L\<^sub>s\<^sub>u\<^sub>b (a ~~ One) " by simp
lemma regexp_unit_left : "Lang (a) = Lang (One ~~ a) " by simp
lemma regexp_unit_left' : "L\<^sub>s\<^sub>u\<^sub>b (a) = L\<^sub>s\<^sub>u\<^sub>b (One ~~ a) " by simp
lemma opt_star_incl :"Lang (opt a) \<subseteq> Lang (Star a)" by (simp add: opt_def subset_iff)
lemma opt_star_incl':"L\<^sub>s\<^sub>u\<^sub>b (opt a) \<subseteq> L\<^sub>s\<^sub>u\<^sub>b (Star a)" by (simp add: opt_def subset_iff)
lemma rep1_star_incl:"Lang (rep1 a) \<subseteq> Lang (Star a)"
unfolding rep1_def by(subst L_Star, subst L_Conc)(force)
lemma rep1_star_incl':"L\<^sub>s\<^sub>u\<^sub>b (rep1 a) \<subseteq> L\<^sub>s\<^sub>u\<^sub>b (Star a)"
unfolding rep1_def by(subst L\<^sub>s\<^sub>u\<^sub>b_Star, subst L\<^sub>s\<^sub>u\<^sub>b_Conc)(force)
lemma cancel_rep1 : "Lang (a) \<subseteq> Lang (rep1 a)"
unfolding rep1_def by auto
lemma cancel_rep1' : "L\<^sub>s\<^sub>u\<^sub>b (a) \<subseteq> L\<^sub>s\<^sub>u\<^sub>b (rep1 a)"
unfolding rep1_def by auto
lemma seq_cancel_opt : "Lang (a) \<subseteq> Lang (c) \<Longrightarrow> Lang (a) \<subseteq> Lang (opt b ~~ c)"
by(subst regexp_unit_left, rule regexp_seq_mono)(simp_all add: opt_def)
lemma seq_cancel_opt' : "L\<^sub>s\<^sub>u\<^sub>b (a) \<subseteq> L\<^sub>s\<^sub>u\<^sub>b (c) \<Longrightarrow> L\<^sub>s\<^sub>u\<^sub>b (a) \<subseteq> L\<^sub>s\<^sub>u\<^sub>b (opt b ~~ c)"
by(subst regexp_unit_left', rule regexp_seq_mono')(simp_all add: opt_def)
lemma seq_cancel_Star : "Lang (a) \<subseteq> Lang (c) \<Longrightarrow> Lang (a) \<subseteq> Lang (Star b ~~ c)"
by auto
lemma seq_cancel_Star' : "L\<^sub>s\<^sub>u\<^sub>b (a) \<subseteq> L\<^sub>s\<^sub>u\<^sub>b (c) \<Longrightarrow> L\<^sub>s\<^sub>u\<^sub>b (a) \<subseteq> L\<^sub>s\<^sub>u\<^sub>b (Star b ~~ c)"
by auto
lemma mono_Star : "Lang (a) \<subseteq> Lang (b) \<Longrightarrow> Lang (Star a) \<subseteq> Lang (Star b)"
by(auto)(metis in_star_iff_concat order.trans)
lemma mono_Star' : "L\<^sub>s\<^sub>u\<^sub>b (a) \<subseteq> L\<^sub>s\<^sub>u\<^sub>b (b) \<Longrightarrow> L\<^sub>s\<^sub>u\<^sub>b (Star a) \<subseteq> L\<^sub>s\<^sub>u\<^sub>b (Star b)"
by(auto)(metis in_star_iff_concat order.trans)
lemma mono_rep1_star:"Lang (a) \<subseteq> Lang (b) \<Longrightarrow> Lang (rep1 a) \<subseteq> Lang (Star b)"
using mono_Star rep1_star_incl by blast
lemma mono_rep1_star':"L\<^sub>s\<^sub>u\<^sub>b (a) \<subseteq> L\<^sub>s\<^sub>u\<^sub>b (b) \<Longrightarrow> L\<^sub>s\<^sub>u\<^sub>b (rep1 a) \<subseteq> L\<^sub>s\<^sub>u\<^sub>b (Star b)"
using mono_Star' rep1_star_incl' by blast
no_notation Star ("\<lbrace>(_)\<rbrace>\<^sup>*" [0]100)
no_notation Plus (infixr "||" 55)
no_notation Times (infixr "~~" 60)

View File

@ -13,7 +13,7 @@
(*<*)
theory "Isabelle_DOF_Manual"
imports "M_05_Implementation"
imports "M_07_Implementation"
begin
close_monitor*[this]
check_doc_global

View File

@ -0,0 +1,227 @@
(*************************************************************************
* Copyright (C)
* 2019-2022 University of Exeter
* 2018-2022 University of Paris-Saclay
* 2018 The University of Sheffield
*
* License:
* This program can be redistributed and/or modified under the terms
* of the 2-clause BSD-style license.
*
* SPDX-License-Identifier: BSD-2-Clause
*************************************************************************)
(*<*)
theory
"M_04_Document_Ontology"
imports
"M_03_GuidedTour"
keywords "class_synonym" :: thy_defn
begin
(*>*)
(*<*)
definition combinator1 :: "'a \<Rightarrow> ('a \<Rightarrow> 'b) \<Rightarrow> 'b" (infixl "|>" 65)
where "x |> f = f x"
ML\<open>
local
val _ =
Outer_Syntax.local_theory \<^command_keyword>\<open>class_synonym\<close> "declare type abbreviation"
(Parse.type_args -- Parse.binding --
(\<^keyword>\<open>=\<close> |-- Parse.!!! (Parse.typ -- Parse.opt_mixfix'))
>> (fn ((args, a), (rhs, mx)) => snd o Typedecl.abbrev_cmd (a, args, mx) rhs));
in end
\<close>
(*>*)
(*<*)
doc_class "title" = short_title :: "string option" <= "None"
doc_class elsevier =
organization :: string
address_line :: string
postcode :: int
city :: string
(*doc_class elsevier_affiliation = affiliation +*)
doc_class acm =
position :: string
institution :: string
department :: int
street_address :: string
city :: string
state :: int
country :: string
postcode :: int
(*doc_class acm_affiliation = affiliation +*)
doc_class lncs =
institution :: string
(*doc_class lncs_affiliation = affiliation +*)
doc_class author =
name :: string
email :: "string" <= "''''"
invariant ne_name :: "name \<sigma> \<noteq> ''''"
doc_class elsevier_author = "author" +
affiliations :: "elsevier list"
short_author :: string
url :: string
footnote :: string
text*[el1:: "elsevier"]\<open>\<close>
(*text*[el_aff1:: "affiliation", journal_style = "@{elsevier \<open>el1\<close>}"]\<open>\<close>*)
term*\<open>@{elsevier \<open>el1\<close>}\<close>
text*[el_auth1:: "elsevier_author", affiliations = "[@{elsevier \<open>el1\<close>}]"]\<open>\<close>
doc_class acm_author = "author" +
affiliations :: "acm list"
orcid :: int
footnote :: string
text*[acm1:: "acm"]\<open>\<close>
(*text*[acm_aff1:: "acm affiliation", journal_style = "@{acm \<open>acm1\<close>}"]\<open>\<close>*)
text*[acm_auth1:: "acm_author", affiliations = "[@{acm \<open>acm1\<close>}]"]\<open>\<close>
doc_class lncs_author = "author" +
affiliations :: "lncs list"
orcid :: int
short_author :: string
footnote :: string
text*[lncs1:: "lncs"]\<open>\<close>
(*text*[lncs_aff1:: "lncs affiliation", journal_style = "@{lncs \<open>lncs1\<close>}"]\<open>\<close>*)
text*[lncs_auth1:: "lncs_author", affiliations = "[@{lncs \<open>lncs1\<close>}]"]\<open>\<close>
doc_class "text_element" =
authored_by :: "author set" <= "{}"
level :: "int option" <= "None"
invariant authors_req :: "authored_by \<sigma> \<noteq> {}"
and level_req :: "the (level \<sigma>) > 1"
doc_class "introduction" = "text_element" +
authored_by :: "(author) set" <= "UNIV"
doc_class "technical" = "text_element" +
formal_results :: "thm list"
doc_class "definition" = "technical" +
is_formal :: "bool"
doc_class "theorem" = "technical" +
is_formal :: "bool"
assumptions :: "term list" <= "[]"
statement :: "term option" <= "None"
doc_class "conclusion" = "text_element" +
resumee :: "(definition set \<times> theorem set)"
invariant is_form :: "(\<exists>x\<in>(fst (resumee \<sigma>)). definition.is_formal x) \<longrightarrow>
(\<exists>y\<in>(snd (resumee \<sigma>)). is_formal y)"
text*[def::"definition", is_formal = "True"]\<open>\<close>
text*[theo::"theorem", is_formal = "False"]\<open>\<close>
text*[conc::"conclusion", resumee="({@{definition \<open>def\<close>}}, {@{theorem \<open>theo\<close>}})"]\<open>\<close>
value*\<open>resumee @{conclusion \<open>conc\<close>} |> fst\<close>
value*\<open>resumee @{conclusion \<open>conc\<close>} |> snd\<close>
doc_class "article" =
style_id :: string <= "''LNCS''"
accepts "(title ~~ \<lbrace>author\<rbrace>\<^sup>+ ~~ \<lbrace>introduction\<rbrace>\<^sup>+
~~ \<lbrace>\<lbrace>definition ~~ example\<rbrace>\<^sup>+ || theorem\<rbrace>\<^sup>+ ~~ \<lbrace>conclusion\<rbrace>\<^sup>+)"
datatype kind = expert_opinion | argument | "proof"
onto_class result = " technical" +
evidence :: kind
property :: " theorem list" <= "[]"
invariant has_property :: "evidence \<sigma> = proof \<longleftrightarrow> property \<sigma> \<noteq> []"
(*>*)
text*[paper_m::float, main_caption="\<open>A Basic Document Ontology: paper$^m$\<close>"]\<open>
@{boxed_theory_text [display,indent=5]
\<open>doc_class "title" = short_title :: "string option" <= "None"
doc_class affiliation =
journal_style :: '\<alpha>
doc_class author =
affiliations :: "'\<alpha> affiliation list"
name :: string
email :: "string" <= "''''"
invariant ne_name :: "name \<sigma> \<noteq> ''''"
doc_class "text_element" =
authored_by :: "('\<alpha> author) set" <= "{}"
level :: "int option" <= "None"
invariant authors_req :: "authored_by \<noteq> {}"
and level_req :: "the (level) > 1"
doc_class "introduction" = text_element +
authored_by :: "('\<alpha> author) set" <= "UNIV"
doc_class "technical" = text_element +
formal_results :: "thm list"
doc_class "definition" = technical +
is_formal :: "bool"
doc_class "theorem" = technical +
assumptions :: "term list" <= "[]"
statement :: "term option" <= "None"
doc_class "conclusion" = text_element +
resumee :: "(definition set \<times> theorem set)"
invariant (\<forall>x\<in>fst resumee. is_formal x)
\<longrightarrow> (\<exists>y\<in>snd resumee. is_formal y)
doc_class "article" =
style_id :: string <= "''LNCS''"
accepts "(title ~~ \<lbrace>author\<rbrace>\<^sup>+ ~~ \<lbrace>introduction\<rbrace>\<^sup>+
~~ \<lbrace>\<lbrace>definition ~~ example\<rbrace>\<^sup>+ || theorem\<rbrace>\<^sup>+ ~~ \<lbrace>conclusion\<rbrace>\<^sup>+)"\<close>}
\<close>
(*<*)
datatype role = PM \<comment> \<open>Program Manager\<close>
| RQM \<comment> \<open>Requirements Manager\<close>
| DES \<comment> \<open>Designer\<close>
| IMP \<comment> \<open>Implementer\<close>
| ASR \<comment> \<open>Assessor\<close>
| INT \<comment> \<open>Integrator\<close>
| TST \<comment> \<open>Tester\<close>
| VER \<comment> \<open>Verifier\<close>
| VnV \<comment> \<open>Verification and Validation\<close>
| VAL \<comment> \<open>Validator\<close>
abbreviation developer where "developer == DES"
abbreviation validator where "validator == VAL"
abbreviation verifier where "verifier == VER"
doc_class requirement = Isa_COL.text_element +
long_name :: "string option"
is_concerned :: "role set"
text*[req1::requirement,
is_concerned="{developer, validator}"]
\<open>The operating system shall provide secure
memory separation.\<close>
text\<open>
The recurring issue of the certification
is @{requirement \<open>req1\<close>} ...\<close>
term "\<lparr>long_name = None,is_concerned = {developer,validator}\<rparr>"
(*>*)
(*<*)
end
(*>*)

View File

@ -0,0 +1,457 @@
(*************************************************************************
* Copyright (C)
* 2019-2022 University of Exeter
* 2018-2022 University of Paris-Saclay
* 2018 The University of Sheffield
*
* License:
* This program can be redistributed and/or modified under the terms
* of the 2-clause BSD-style license.
*
* SPDX-License-Identifier: BSD-2-Clause
*************************************************************************)
(*<*)
theory
"M_05_Proofs_Ontologies"
imports
"M_04_Document_Ontology"
begin
(*>*)
chapter*[onto_proofs::technical]\<open>Proofs over Ontologies\<close>
text\<open>It is a distinguishing feature of \<^dof> that it does not directly generate meta-data rather
than generating a \<^emph>\<open>theory of meta-data\<close> that can be used in HOL-terms on various levels
of the Isabelle-system and its document generation technology. Meta-data theories can be
converted into executable code and efficiently used in validations, but also used for theoretic
reasoning over given ontologies. While the full potential of this latter possibility still
needs to be explored, we present in the following sections two applications:
\<^enum> Verified ontological morphisms, also called \<^emph>\<open>ontological mappings\<close> in the literature
\<^cite>\<open>"books/daglib/0032976" and "atl" and "BGPP95"\<close>, \<^ie> proofs of invariant preservation
along translation-functions of all instances of \<^verbatim>\<open>doc_class\<close>-es.
\<^enum> Verified refinement relations between the structural descriptions of theory documents,
\<^ie> proofs of language inclusions of monitors of global ontology classes.
\<close>
section*["morphisms"::scholarly_paper.text_section] \<open>Proving Properties over Ontologies\<close>
subsection\<open>Ontology-Morphisms: a Prototypical Example\<close>
text\<open>We define a small ontology with the following classes:\<close>
doc_class AA = aa :: nat
doc_class BB = bb :: int
doc_class CC = cc :: int
doc_class DD = dd :: int
doc_class EE = ee :: int
doc_class FF = ff :: int
onto_morphism (AA, BB) to CC and (DD, EE) to FF
where "convert\<^sub>A\<^sub>A\<^sub>\<times>\<^sub>B\<^sub>B\<^sub>\<Rightarrow>\<^sub>C\<^sub>C \<sigma> = \<lparr> CC.tag_attribute = 1::int,
CC.cc = int(aa (fst \<sigma>)) + bb (snd \<sigma>)\<rparr>"
and "convert\<^sub>D\<^sub>D\<^sub>\<times>\<^sub>E\<^sub>E\<^sub>\<Rightarrow>\<^sub>F\<^sub>F \<sigma> = \<lparr> FF.tag_attribute = 1::int,
FF.ff = dd (fst \<sigma>) + ee (snd \<sigma>) \<rparr>"
text\<open>Note that the \<^term>\<open>convert\<^sub>A\<^sub>A\<^sub>\<times>\<^sub>B\<^sub>B\<^sub>\<Rightarrow>\<^sub>C\<^sub>C\<close>-morphism involves a data-format conversion, and that the
resulting transformation of @{doc_class AA}-instances and @{doc_class BB}-instances is surjective
but not injective. The \<^term>\<open>CC.tag_attribute\<close> is used to potentially differentiate instances with
equal attribute-content and is irrelevant here.\<close>
(*<*) (* Just a test, irrelevant for the document.*)
doc_class A_A = aa :: nat
doc_class BB' = bb :: int
onto_morphism (A_A, BB', CC, DD, EE) to FF
where "convert\<^sub>A\<^sub>_\<^sub>A\<^sub>\<times>\<^sub>B\<^sub>B\<^sub>'\<^sub>\<times>\<^sub>C\<^sub>C\<^sub>\<times>\<^sub>D\<^sub>D\<^sub>\<times>\<^sub>E\<^sub>E\<^sub>\<Rightarrow>\<^sub>F\<^sub>F \<sigma> = \<lparr> FF.tag_attribute = 1::int,
FF.ff = int(aa (fst \<sigma>)) + bb (fst (snd \<sigma>))\<rparr>"
(*>*)
text\<open>This specification construct introduces the following constants and definitions:
\<^item> @{term [source] \<open>convert\<^sub>A\<^sub>A\<^sub>_\<^sub>B\<^sub>B\<^sub>_\<^sub>C\<^sub>C :: AA \<times> BB \<Rightarrow> CC\<close>}
\<^item> @{term [source] \<open>convert\<^sub>D\<^sub>D\<^sub>_\<^sub>E\<^sub>E\<^sub>_\<^sub>F\<^sub>F :: DD \<times> EE \<Rightarrow> FF\<close>}
% @{term [source] \<open>convert\<^sub>A\<^sub>_\<^sub>A\<^sub>\<times>\<^sub>B\<^sub>B\<^sub>'\<^sub>\<times>\<^sub>C\<^sub>C\<^sub>\<times>\<^sub>D\<^sub>D\<^sub>\<times>\<^sub>E\<^sub>E\<^sub>\<Rightarrow>\<^sub>F\<^sub>F :: A_A \<times> BB' \<times> CC \<times> DD \<times> EE \<Rightarrow> FF\<close>}
and corresponding definitions. \<close>
subsection\<open>Proving the Preservation of Ontological Mappings : A Document-Ontology Morphism\<close>
text\<open>\<^dof> as a system is currently particularly geared towards \<^emph>\<open>document\<close>-ontologies, in
particular for documentations generated from Isabelle theories. We used it meanwhile for the
generation of various conference and journal papers, notably using the
\<^theory>\<open>Isabelle_DOF.scholarly_paper\<close> and \<^theory>\<open>Isabelle_DOF.technical_report\<close>-ontologies,
targeting a (small) variety of \<^LaTeX> style-files. A particular aspect of these ontologies,
especially when targeting journals from publishers such as ACM, Springer or Elsevier, is the
description of publication meta-data. Publishers tend to have their own styles on what kind
meta-data should be associated with a journal publication; thus, the depth and
precise format of affiliations, institution, their relation to authors, and author descriptions
(with photos or without, hair left-combed or right-combed, etcpp...) varies.
In the following, we present an attempt to generalized ontology with several ontology mappings
to more specialized ones such as concrete journals and/or the \<^theory>\<open>Isabelle_DOF.scholarly_paper\<close>-
ontology which we mostly used for our own publications.
\<close>
doc_class elsevier_org =
organization :: string
address_line :: string
postcode :: int
city :: string
doc_class acm_org =
position :: string
institution :: string
department :: int
street_address :: string
city :: string
state :: int
country :: string
postcode :: int
doc_class lncs_inst =
institution :: string
doc_class author =
name :: string
email :: "string" <= "''''"
invariant ne_fsnames :: "name \<sigma> \<noteq> ''''"
doc_class elsevierAuthor = "author" +
affiliations :: "elsevier_org list"
firstname :: string
surname :: string
short_author :: string
url :: string
footnote :: string
invariant ne_fsnames :: "firstname \<sigma> \<noteq> '''' \<and> surname \<sigma> \<noteq> ''''"
(*<*)
text*[el1:: "elsevier_org"]\<open>An example elsevier-journal affiliation.\<close>
term*\<open>@{elsevier_org \<open>el1\<close>}\<close>
text*[el_auth1:: "elsevierAuthor", affiliations = "[@{elsevier_org \<open>el1\<close>}]"]\<open>\<close>
(*>*)
text\<open>\<close>
doc_class acmAuthor = "author" +
affiliations :: "acm_org list"
firstname :: string
familyname :: string
orcid :: int
footnote :: string
invariant ne_fsnames :: "firstname \<sigma> \<noteq> '''' \<and> familyname \<sigma> \<noteq> ''''"
(*<*)
text*[acm1:: "acm"]\<open>An example acm-style affiliation\<close>
(*>*)
text\<open>\<close>
doc_class lncs_author = "author" +
affiliations :: "lncs list"
orcid :: int
short_author :: string
footnote :: string
(*<*)
text*[lncs1:: "lncs"]\<open>An example lncs-style affiliation\<close>
text*[lncs_auth1:: "lncs_author", affiliations = "[@{lncs \<open>lncs1\<close>}]"]\<open>Another example lncs-style affiliation\<close>
find_theorems elsevier.tag_attribute
(*>*)
text\<open>\<close>
definition acm_name where "acm_name f s = f @ '' '' @ s"
fun concatWith :: "string \<Rightarrow> string list \<Rightarrow> string"
where "concatWith str [] = ''''"
|"concatWith str [a] = a"
|"concatWith str (a#R) = a@str@(concatWith str R)"
lemma concatWith_non_mt : "(S\<noteq>[] \<and> (\<exists> s\<in>set S. s\<noteq>'''')) \<longrightarrow> (concatWith sep S) \<noteq> ''''"
proof(induct S)
case Nil
then show ?case by simp
next
case (Cons a S)
then show ?case apply(auto)
using concatWith.elims apply blast
using concatWith.elims apply blast
using list.set_cases by force
qed
onto_morphism (acm) to elsevier
where "convert\<^sub>a\<^sub>c\<^sub>m\<^sub>\<Rightarrow>\<^sub>e\<^sub>l\<^sub>s\<^sub>e\<^sub>v\<^sub>i\<^sub>e\<^sub>r \<sigma> =
\<lparr>elsevier.tag_attribute = acm.tag_attribute \<sigma>,
organization = acm.institution \<sigma>,
address_line = concatWith '','' [acm.street_address \<sigma>, acm.city \<sigma>],
postcode = acm.postcode \<sigma> ,
city = acm.city \<sigma> \<rparr>"
text\<open>Here is a more basic, but equivalent definition for the other way round:\<close>
definition elsevier_to_acm_morphism :: "elsevier_org \<Rightarrow> acm_org"
("_ \<langle>acm\<rangle>\<^sub>e\<^sub>l\<^sub>s\<^sub>e\<^sub>v\<^sub>i\<^sub>e\<^sub>r" [1000]999)
where "\<sigma> \<langle>acm\<rangle>\<^sub>e\<^sub>l\<^sub>s\<^sub>e\<^sub>v\<^sub>i\<^sub>e\<^sub>r = \<lparr> acm_org.tag_attribute = 1::int,
acm_org.position = ''no position'',
acm_org.institution = organization \<sigma>,
acm_org.department = 0,
acm_org.street_address = address_line \<sigma>,
acm_org.city = elsevier_org.city \<sigma>,
acm_org.state = 0,
acm_org.country = ''no country'',
acm_org.postcode = elsevier_org.postcode \<sigma> \<rparr>"
text\<open>The following onto-morphism links \<^typ>\<open>elsevierAuthor\<close>'s and \<^typ>\<open>acmAuthor\<close>. Note that
the conversion implies trivial data-conversions (renaming of attributes in the classes),
string-representation conversions, and conversions of second-staged, referenced instances.\<close>
onto_morphism (elsevierAuthor) to acmAuthor
where "convert\<^sub>e\<^sub>l\<^sub>s\<^sub>e\<^sub>v\<^sub>i\<^sub>e\<^sub>r\<^sub>A\<^sub>u\<^sub>t\<^sub>h\<^sub>o\<^sub>r\<^sub>\<Rightarrow>\<^sub>a\<^sub>c\<^sub>m\<^sub>A\<^sub>u\<^sub>t\<^sub>h\<^sub>o\<^sub>r \<sigma> =
\<lparr>author.tag_attribute = undefined,
name = concatWith '','' [elsevierAuthor.firstname \<sigma>,elsevierAuthor.surname \<sigma>],
email = author.email \<sigma>,
acmAuthor.affiliations = (elsevierAuthor.affiliations \<sigma>)
|> map (\<lambda>x. x \<langle>acm\<rangle>\<^sub>e\<^sub>l\<^sub>s\<^sub>e\<^sub>v\<^sub>i\<^sub>e\<^sub>r),
firstname = elsevierAuthor.firstname \<sigma>,
familyname = elsevierAuthor.surname \<sigma>,
orcid = 0, \<comment> \<open>la triche ! ! !\<close>
footnote = elsevierAuthor.footnote \<sigma>\<rparr>"
lemma elsevier_inv_preserved :
"elsevierAuthor.ne_fsnames_inv \<sigma>
\<Longrightarrow> acmAuthor.ne_fsnames_inv (convert\<^sub>e\<^sub>l\<^sub>s\<^sub>e\<^sub>v\<^sub>i\<^sub>e\<^sub>r\<^sub>A\<^sub>u\<^sub>t\<^sub>h\<^sub>o\<^sub>r\<^sub>\<Rightarrow>\<^sub>a\<^sub>c\<^sub>m\<^sub>A\<^sub>u\<^sub>t\<^sub>h\<^sub>o\<^sub>r \<sigma>)
\<and> author.ne_fsnames_inv (convert\<^sub>e\<^sub>l\<^sub>s\<^sub>e\<^sub>v\<^sub>i\<^sub>e\<^sub>r\<^sub>A\<^sub>u\<^sub>t\<^sub>h\<^sub>o\<^sub>r\<^sub>\<Rightarrow>\<^sub>a\<^sub>c\<^sub>m\<^sub>A\<^sub>u\<^sub>t\<^sub>h\<^sub>o\<^sub>r \<sigma>)"
unfolding elsevierAuthor.ne_fsnames_inv_def acmAuthor.ne_fsnames_inv_def
convert\<^sub>e\<^sub>l\<^sub>s\<^sub>e\<^sub>v\<^sub>i\<^sub>e\<^sub>r\<^sub>A\<^sub>u\<^sub>t\<^sub>h\<^sub>o\<^sub>r\<^sub>_\<^sub>a\<^sub>c\<^sub>m\<^sub>A\<^sub>u\<^sub>t\<^sub>h\<^sub>o\<^sub>r_def author.ne_fsnames_inv_def
by auto
text\<open>The proof is, in order to quote Tony Hoare, ``as simple as it should be''. Note that it involves
the lemmas like @{thm concatWith_non_mt} which in itself require inductions, \<^ie>, which are out of
reach of pure ATP proof-techniques. \<close>
subsection\<open>Proving the Preservation of Ontological Mappings : A Domain-Ontology Morphism\<close>
text\<open>The following example is drawn from a domain-specific scenario: For conventional data-models,
be it represented by UML-class diagrams or SQL-like "tables" or Excel-sheet like presentations
of uniform data, we can conceive an ontology (which is equivalent here to a conventional style-sheet)
and annotate textual raw data. This example describes how meta-data can be used to
calculate and transform this kind of representations in a type-safe and verified way. \<close>
(*<*)
(* Mapped_PILIB_Ontology example *)
(* rethink examples: should we "morph" providence too ? ? ? Why not ? bu *)
term\<open>fold (+) S 0\<close>
definition sum
where "sum S = (fold (+) S 0)"
(*>*)
text\<open>We model some basic enumerations as inductive data-types: \<close>
datatype Hardware_Type =
Motherboard | Expansion_Card | Storage_Device | Fixed_Media |
Removable_Media | Input_Device | Output_Device
datatype Software_Type =
Operating_system | Text_editor | Web_Navigator | Development_Environment
text\<open>In the sequel, we model a ''Reference Ontology'', \<^ie> a data structure in which we assume
that standards or some de-facto-standard data-base refer to the data in the domain of electronic
devices:\<close>
onto_class Resource =
name :: string
onto_class Electronic = Resource +
provider :: string
manufacturer :: string
onto_class Component = Electronic +
mass :: int
onto_class Simulation_Model = Electronic +
simulate :: Component
composed_of :: "Component list"
version :: int
onto_class Informatic = Resource +
description :: string
onto_class Hardware = Informatic +
type :: Hardware_Type
mass :: int
composed_of :: "Component list"
invariant c1 :: "mass \<sigma> = sum(map Component.mass (composed_of \<sigma>))"
onto_class Software = Informatic +
type :: Software_Type
version :: int
text\<open>Finally, we present a \<^emph>\<open>local ontology\<close>, \<^ie> a data structure used in a local store
in its data-base of cash-system:\<close>
onto_class Item =
name :: string
onto_class Product = Item +
serial_number :: int
provider :: string
mass :: int
onto_class Electronic_Component = Product +
serial_number :: int
onto_class Monitor = Product +
composed_of :: "Electronic_Component list"
invariant c2 :: "Product.mass \<sigma> = sum(map Product.mass (composed_of \<sigma>))"
term\<open>Product.mass \<sigma> = sum(map Product.mass (composed_of \<sigma>))\<close>
onto_class Computer_Software = Item +
type :: Software_Type
version :: int
text\<open>These two example ontologies were linked via conversion functions called \<^emph>\<open>morphisms\<close>.
The hic is that we can prove for the morphisms connecting these ontologies, that the conversions
are guaranteed to preserve the data-invariants, although the data-structures (and, of course,
the presentation of them) is very different. Besides, morphisms functions can be ``forgetful''
(\<^ie> surjective), ``embedding'' (\<^ie> injective) or even ``one-to-one'' ((\<^ie> bijective).\<close>
definition Item_to_Resource_morphism :: "Item \<Rightarrow> Resource"
("_ \<langle>Resource\<rangle>\<^sub>I\<^sub>t\<^sub>e\<^sub>m" [1000]999)
where " \<sigma> \<langle>Resource\<rangle>\<^sub>I\<^sub>t\<^sub>e\<^sub>m =
\<lparr> Resource.tag_attribute = 1::int ,
Resource.name = name \<sigma> \<rparr>"
definition Product_to_Resource_morphism :: "Product \<Rightarrow> Resource"
("_ \<langle>Resource\<rangle>\<^sub>P\<^sub>r\<^sub>o\<^sub>d\<^sub>u\<^sub>c\<^sub>t" [1000]999)
where " \<sigma> \<langle>Resource\<rangle>\<^sub>P\<^sub>r\<^sub>o\<^sub>d\<^sub>u\<^sub>c\<^sub>t =
\<lparr> Resource.tag_attribute = 2::int ,
Resource.name = name \<sigma> \<rparr>"
definition Computer_Software_to_Software_morphism :: "Computer_Software \<Rightarrow> Software"
("_ \<langle>Software\<rangle>\<^sub>S\<^sub>o\<^sub>f\<^sub>t\<^sub>C\<^sub>m\<^sub>p" [1000]999)
where "\<sigma> \<langle>Software\<rangle>\<^sub>S\<^sub>o\<^sub>f\<^sub>t\<^sub>C\<^sub>m\<^sub>p =
\<lparr> Resource.tag_attribute = 3::int ,
Resource.name = name \<sigma> ,
Informatic.description = ''no description'',
Software.type = type \<sigma> ,
Software.version = version \<sigma> \<rparr>"
definition Electronic_Component_to_Component_morphism :: "Electronic_Component \<Rightarrow> Component"
("_ \<langle>Component\<rangle>\<^sub>E\<^sub>l\<^sub>e\<^sub>c\<^sub>C\<^sub>m\<^sub>p" [1000]999)
where "\<sigma> \<langle>Component\<rangle>\<^sub>E\<^sub>l\<^sub>e\<^sub>c\<^sub>C\<^sub>m\<^sub>p =
\<lparr> Resource.tag_attribute = 4::int ,
Resource.name = name \<sigma> ,
Electronic.provider = provider \<sigma> ,
Electronic.manufacturer = ''no manufacturer'' ,
Component.mass = mass \<sigma> \<rparr>"
definition Monitor_to_Hardware_morphism :: "Monitor \<Rightarrow> Hardware"
("_ \<langle>Hardware\<rangle>\<^sub>C\<^sub>o\<^sub>m\<^sub>p\<^sub>u\<^sub>t\<^sub>e\<^sub>r\<^sub>H\<^sub>a\<^sub>r\<^sub>d\<^sub>w\<^sub>a\<^sub>r\<^sub>e" [1000]999)
where "\<sigma> \<langle>Hardware\<rangle>\<^sub>C\<^sub>o\<^sub>m\<^sub>p\<^sub>u\<^sub>t\<^sub>e\<^sub>r\<^sub>H\<^sub>a\<^sub>r\<^sub>d\<^sub>w\<^sub>a\<^sub>r\<^sub>e =
\<lparr> Resource.tag_attribute = 5::int ,
Resource.name = name \<sigma> ,
Informatic.description = ''no description'',
Hardware.type = Output_Device,
Hardware.mass = mass \<sigma> ,
Hardware.composed_of = map Electronic_Component_to_Component_morphism (composed_of \<sigma>)
\<rparr>"
text\<open>On this basis, we can state the following invariant preservation theorems:\<close>
lemma inv_c2_preserved :
"c2_inv \<sigma> \<Longrightarrow> c1_inv (\<sigma> \<langle>Hardware\<rangle>\<^sub>C\<^sub>o\<^sub>m\<^sub>p\<^sub>u\<^sub>t\<^sub>e\<^sub>r\<^sub>H\<^sub>a\<^sub>r\<^sub>d\<^sub>w\<^sub>a\<^sub>r\<^sub>e)"
unfolding c1_inv_def c2_inv_def
Monitor_to_Hardware_morphism_def Electronic_Component_to_Component_morphism_def
by (auto simp: comp_def)
lemma Monitor_to_Hardware_morphism_total :
"Monitor_to_Hardware_morphism ` ({X::Monitor. c2_inv X}) \<subseteq> ({X::Hardware. c1_inv X})"
using inv_c2_preserved
by auto
type_synonym local_ontology = "Item * Electronic_Component * Monitor"
type_synonym reference_ontology = "Resource * Component * Hardware"
fun ontology_mapping :: "local_ontology \<Rightarrow> reference_ontology"
where "ontology_mapping (x, y, z) = (x\<langle>Resource\<rangle>\<^sub>I\<^sub>t\<^sub>e\<^sub>m, y\<langle>Component\<rangle>\<^sub>E\<^sub>l\<^sub>e\<^sub>c\<^sub>C\<^sub>m\<^sub>p, z\<langle>Hardware\<rangle>\<^sub>C\<^sub>o\<^sub>m\<^sub>p\<^sub>u\<^sub>t\<^sub>e\<^sub>r\<^sub>H\<^sub>a\<^sub>r\<^sub>d\<^sub>w\<^sub>a\<^sub>r\<^sub>e)"
lemma ontology_mapping_total :
"ontology_mapping ` {X. c2_inv (snd (snd X))} \<subseteq> {X. c1_inv (snd (snd X))}"
using inv_c2_preserved
by auto
text\<open>Note that in contrast to conventional data-translations, the preservation of a class-invariant
is not just established by a validation of the result, it is proven once and for all for all instances
of the classes.\<close>
subsection\<open>Proving Monitor-Refinements\<close>
(*<*)
(* switch on regexp syntax *)
notation Star ("\<lbrace>(_)\<rbrace>\<^sup>*" [0]100)
notation Plus (infixr "||" 55)
notation Times (infixr "~~" 60)
notation Atom ("\<lfloor>_\<rfloor>" 65)
(*>*)
text\<open>Monitors are regular-expressions that allow for specifying instances of classes to appear in
a particular order in a document. They are used to specify some structural aspects of a document.
Based on an AFP theory by Tobias Nipkow on Functional Automata
(\<^ie> a characterization of regular automata using functional polymorphic descriptions of transition
functions avoiding any of the ad-hoc finitizations commonly used in automata theory), which
comprises also functions to generate executable deterministic and non-deterministic automata,
this theory is compiled to SML-code that was integrated in the \<^dof> system. The necessary
adaptions of this integration can be found in the theory \<^theory>\<open>Isabelle_DOF.RegExpInterface\<close>,
which also contains the basic definitions and theorems for the concepts used here.
Recall that the monitor of \<^term>\<open>scholarly_paper.article\<close> is defined by: \<^vs>\<open>0.5cm\<close>
@{thm [indent=20, margin=70, names_short] scholarly_paper.article_monitor_def}
\<^vs>\<open>0.5cm\<close> However, it is possible to reason over the language of monitors and prove classical
refinement notions such as trace-refinement on the monitor-level, so once-and-for-all for all
instances of validated documents conforming to a particular ontology. The primitive recursive
operators \<^term>\<open>RegExpInterface.Lang\<close> and \<^term>\<open>RegExpInterface.L\<^sub>s\<^sub>u\<^sub>b\<close> generate the languages of the
regular expression language, where \<^term>\<open>L\<^sub>s\<^sub>u\<^sub>b\<close> takes the sub-ordering relation of classes into
account.
The proof of : \<^vs>\<open>0.5cm\<close>
@{thm [indent=20,margin=70,names_short] articles_sub_reports}
\<^vs>\<open>0.5cm\<close> can be found in theory \<^theory>\<open>Isabelle_DOF.technical_report\<close>;
it is, again, "as simple as it should be" (to cite Tony Hoare).
The proof of: \<^vs>\<open>0.5cm\<close>
@{thm [indent=20,margin=70,names_short] articles_Lsub_reports}
\<^vs>\<open>0.5cm\<close> is slightly more evolved; this is due to the fact that \<^dof> does not generate a proof of
the acyclicity of the graph of the class-hierarchy @{term doc_class_rel} automatically. For a given
hierarchy, this proof will always succeed (since \<^dof> checks this on the meta-level, of course),
which permits to deduce the anti-symmetry of the transitive closure of @{term doc_class_rel}
and therefore to establish that the doc-classes can be organized in an order (\<^ie> the
type \<^typ>\<open>doc_class\<close> is an instance of the type-class \<^class>\<open>order\<close>). On this basis, the proof
of the above language refinement is quasi automatic. This proof is also part of
\<^theory>\<open>Isabelle_DOF.technical_report\<close>.
\<close>
(*<*)
(* switch off regexp syntax *)
no_notation Star ("\<lbrace>(_)\<rbrace>\<^sup>*" [0]100)
no_notation Plus (infixr "||" 55)
no_notation Times (infixr "~~" 60)
no_notation Atom ("\<lfloor>_\<rfloor>" 65)
end
(*>*)

View File

@ -13,9 +13,9 @@
(*<*)
theory
"M_04_RefMan"
"M_06_RefMan"
imports
"M_03_GuidedTour"
"M_05_Proofs_Ontologies"
begin
declare_reference*[infrastructure::technical]
@ -30,7 +30,7 @@ text\<open>
representation, and give hints for the development of new document templates.
\<^isadof> is embedded in the underlying generic document model of Isabelle as described in
\<^introduction>\<open>dof\<close>. Recall that the document language can be extended dynamically, \<^ie>, new
@{scholarly_paper.introduction \<open>dof\<close>}. Recall that the document language can be extended dynamically, \<^ie>, new
\<^emph>\<open>user-defined\<close> can be introduced at run-time. This is similar to the definition of new functions
in an interpreter. \<^isadof> as a system plugin provides a number of new command definitions in
Isabelle's document model.
@ -451,7 +451,7 @@ text*[cc_assumption_test_ref::cc_assumption_test]\<open>\<close>
definition tag_l :: "'a \<Rightarrow> 'b \<Rightarrow> 'b" where "tag_l \<equiv> \<lambda>x y. y"
lemma* tagged : "tag_l @{cc_assumption_test \<open>cc_assumption_test_ref\<close>} AA \<Longrightarrow> AA"
lemma* tagged : "tag_l @{cc_assumption_test \<open>cc_assumption_test_ref\<close>} AAA \<Longrightarrow> AAA"
by (simp add: tag_l_def)
find_theorems name:tagged "(_::cc_assumption_test \<Rightarrow> _ \<Rightarrow> _) _ _ \<Longrightarrow>_"
@ -495,7 +495,7 @@ The major commands providing term-contexts are\<^footnote>\<open>The meta-argume
Wrt. creation, checking and traceability, these commands are analogous to the ontological text and
code-commands. However the argument terms may contain term-antiquotations stemming from an
ontology definition. Term-contexts were type-checked and \<^emph>\<open>validated\<close> against
the global context (so: in the term @{term_ [source] \<open>@{author \<open>bu\<close>}\<close>}, \<open>bu\<close>
the global context (so: in the term @{term_ [source] \<open>@{scholarly_paper.author \<open>bu\<close>}\<close>}, \<open>bu\<close>
is indeed a string which refers to a meta-object belonging to the document class \<^typ>\<open>author\<close>,
for example). With the exception of the @{command "term*"}-command, the term-antiquotations
in the other term-contexts are additionally expanded (\<^eg> replaced) by the instance of the class,
@ -671,7 +671,7 @@ doc_class text_element =
variants :: "String.literal set" <= "{STR ''outline'', STR ''document''}"
\<close>}
As mentioned in @{technical \<open>sss\<close>},
As mentioned in @{scholarly_paper.technical \<open>sss\<close>},
\<^const>\<open>level\<close> defines the section-level (\<^eg>, using a \<^LaTeX>-inspired hierarchy:
from \<^boxed_theory_text>\<open>Some -1\<close> (corresponding to \<^boxed_latex>\<open>\part\<close>) to
\<^boxed_theory_text>\<open>Some 0\<close> (corresponding to \<^boxed_latex>\<open>\chapter\<close>, respectively, \<^boxed_theory_text>\<open>chapter*\<close>)
@ -811,7 +811,7 @@ during the editing process but is only visible in the integrated source but usua
a \<^typ>\<open>text_element\<close> to a specific \<^typ>\<open>author\<close>. Note that this is possible since \<^isadof> assigns to each
document class also a class-type which is declared in the HOL environment.\<close>
text*[s23::example, main_author = "Some(@{author \<open>bu\<close>})"]\<open>
text*[s23::example, main_author = "Some(@{scholarly_paper.author \<open>bu\<close>})"]\<open>
Recall that concrete authors can be denoted by term-antiquotations generated by \<^isadof>; for example,
this may be for a text fragment like
@{boxed_theory_text [display]
@ -1370,6 +1370,23 @@ value*\<open>filter (\<lambda>\<sigma>. the (text_section.level \<sigma>) > 1) @
section*[infrastructure::technical]\<open>Technical Infrastructure\<close>
subsection\<open>The Previewer\<close>
figure*[
global_DOF_view::figure
, relative_width="95"
, file_src="''figures/ThisPaperWithPreviewer.png''"
]\<open>A Screenshot while editing this Paper in \<^dof> with Preview.\<close>
text\<open>A screenshot of the editing environment is shown in \<^figure>\<open>global_DOF_view\<close>. It supports
incremental continuous PDF generation which improves usability. Currently, the granularity
is restricted to entire theories (which have to be selected in a specific document pane).
The response times can not (yet) compete
with a Word- or Overleaf editor, though, which is mostly due to the checking and
evaluation overhead (the turnaround of this section is about 30 s). However, we believe
that better parallelization and evaluation techniques will decrease this gap substantially for the
most common cases in future versions. \<close>
subsection\<open>Developing Ontologies and their Representation Mappings\<close>
text\<open>
The document core \<^emph>\<open>may\<close>, but \<^emph>\<open>must\<close> not use Isabelle definitions or proofs for checking the

View File

@ -12,9 +12,9 @@
*************************************************************************)
(*<*)
theory "M_05_Implementation"
imports "M_04_RefMan"
begin
theory "M_07_Implementation"
imports "M_06_RefMan"
begin
(*>*)