From f2bb4f6e31d24a9ba3c2d02882281fb70123e34c Mon Sep 17 00:00:00 2001 From: bu Date: Mon, 15 Jul 2019 17:12:10 +0200 Subject: [PATCH] Modifications / evolutions for the second TR example. Arrived at page 34. --- .../MyCommentedIsabelle.thy | 212 +- .../TR_my_commented_isabelle/ROOT | 5 +- .../TR_MyCommentedIsabelle.thy | 2013 +++++++++++++++++ .../document/root.bib | 528 +++++ 4 files changed, 2736 insertions(+), 22 deletions(-) create mode 100644 examples/technical_report/TR_my_commented_isabelle/TR_MyCommentedIsabelle.thy create mode 100644 examples/technical_report/TR_my_commented_isabelle/document/root.bib diff --git a/examples/technical_report/TR_my_commented_isabelle/MyCommentedIsabelle.thy b/examples/technical_report/TR_my_commented_isabelle/MyCommentedIsabelle.thy index 60e2d12..ca889f7 100644 --- a/examples/technical_report/TR_my_commented_isabelle/MyCommentedIsabelle.thy +++ b/examples/technical_report/TR_my_commented_isabelle/MyCommentedIsabelle.thy @@ -8,15 +8,16 @@ begin open_monitor*[this::report] (*>*) -title*[tit::title]\An Account with my Personal, Ecclectic Comments on the Isabelle Architecture\ -subtitle*[stit::subtitle]\Version : Isabelle 2017\ +title*[tit::title]\My Personal, Ecclectic Isabelle Programming Manual\ +subtitle*[stit::subtitle]\Version : Isabelle 2019\ text*[bu::author, email = "''wolff@lri.fr''", - affiliation = "''Universit\\'e Paris-Saclay, Paris, France''"]\Burkhart Wolff\ + affiliation = "\Université Paris-Saclay, LRI, France\"]\Burkhart Wolff\ text*[abs::abstract, - keywordlist="[''LCF-Architecture'',''Isabelle'',''SML'',''PIDE'',''Tactic Programming'']"]\ + keywordlist="[''LCF-Architecture'',''Isabelle'',''SML'',''PIDE'',''Programming Guide'', + ''Tactic Programming'']"]\ While Isabelle is mostly known as part of Isabelle/HOL (an interactive theorem prover), it actually provides a system framework for developing a wide spectrum of applications. A particular strength of the Isabelle framework is the @@ -127,9 +128,11 @@ figure*[fig2::figure, relative_width="100",src="''figures/text-element''"] text\ text-commands, ML- commands (and in principle any other command) can be seen as \<^emph>\quotations\ over the underlying SML environment (similar to OCaml or Haskell). Linking these different sorts of quotations with each other and the underlying SML-envirnment -is supported via \<^emph>\antiquotations\'s. Generally speaking, antiquotations are a programming technique -to specify expressions or patterns in a quotation, parsed in the context of the enclosing expression -or pattern where the quotation is. +is supported via \<^emph>\antiquotations\'s. Generally speaking, antiquotations are a programming +technique to specify expressions or patterns in a quotation, parsed in the context of the +enclosing expression or pattern where the quotation is. Another way to understand this concept: +anti-quotations are "semantic macros" that produce a value for the surrounding context +(ML, text, HOL, whatever) depending on local arguments and the underlying logical context. The way an antiquotation is specified depends on the quotation expander: typically a specific character and an identifier, or specific parentheses and a complete expression or pattern.\ @@ -306,8 +309,8 @@ ML\ \ -subsection*[t213::example]\Mechanism 2 : global arbitrary data structure that is attached to the global and - local Isabelle context $\theta$ \ +subsection*[t213::example]\Mechanism 2 : global arbitrary data structure that is attached to + the global and local Isabelle context $\theta$ \ ML \ datatype X = mt @@ -1048,11 +1051,31 @@ function. Thus, the Isar-toplevel supports the generic document model and allows for user-programmed extensions. \ -text\Isabelle \<^verbatim>\.thy\-files were processed by two types of parsers: +text\In the traditional literature, Isabelle \<^verbatim>\.thy\-files were +were said to be processed by processed by two types of parsers: \<^enum> the "outer-syntax" (i.e. the syntax for commands) is processed by a lexer-library and parser combinators built on top, and \<^enum> the "inner-syntax" (i.e. the syntax for @{term \\\} - terms) - with an evolved, eight-layer parsing and pretty-printing process. + with an evolved, eight-layer parsing and pretty-printing process + based on an Early-algorithm. +\ + +text\This picture is less and less true for a number of reasons: +\<^enum> With the advent of \inlineisar+\ ... \+, a mechanism for + \<^emph>\cascade-syntax\ came to the Isabelle platform that introduce a flexible means + to change parsing contexts \<^emph>\and\ parsing technologies. +\<^enum> Inside the term-parser levels, the concept of \<^emph>\cartouche\ can be used + to escape the parser and its underlying parsing technology. +\<^enum> Outside, in the traditional toplevel-parsers, the + \inlineisar+\ ... \+ is becoming more and more enforced + (some years ago, syntax like \term{* ... *}\ was replaced by + syntax \term(\)... (\)\. This makes technical support of cascade syntax + more and more easy. +\<^enum> The Lexer infra-structure is already rather generic; nothing prevents to + add beside the lexer - configurations for ML-Parsers, Toplevel Command Syntax + parsers, mathematical notation parsers for $\lambda$-terms new pillars + of parsing technologies, say, for parsing C or Rust or JavaScript inside + Isabelle. \ @@ -1116,7 +1139,8 @@ and properties; @{ML_structure Markup} provides a number of of such \<^emph>\\Isabelle_DOF\. A markup must be tagged with an id; this is done by the @{ML serial}-function discussed earlier.\ -ML\ +subsection\A simple Example\ +ML\ local val docclassN = "doc_class"; @@ -1143,17 +1167,167 @@ in the Front-End, converts this into markup together with a unique number identi markup, and sends this as a report to the Front-End. \ -section\Environment Structured Reporting\ +subsection\A Slightly more Complex Example\ +ML \ + +fun markup_tvar def_name ps (name, id) = + let + fun markup_elem name = (name, (name, []): Markup.T); + val (tvarN, tvar) = markup_elem ((case def_name of SOME name => name | _ => "") ^ "'s nickname is"); + val entity = Markup.entity tvarN name + val def = def_name = NONE + in + tvar :: + (if def then I else cons (Markup.keyword_properties Markup.ML_keyword3)) + (map (fn pos => Markup.properties (Position.entity_properties_of def id pos) entity) ps) + end + +fun report [] _ _ = I + | report ps markup x = + let val ms = markup x + in fold (fn p => fold (fn m => cons ((p, m), "")) ms) ps end +\ + +ML \ +local +val data = \ \Derived from Yakoub's example ;-)\ + + [ (\Frédéric 1er\, \King of Naples\) + , (\Frédéric II\, \King of Sicily\) + , (\Frédéric III\, \the Handsome\) + , (\Frédéric IV\, \of the Empty Pockets\) + , (\Frédéric V\, \King of Denmark–Norway\) + , (\Frédéric VI\, \the Knight\) + , (\Frédéric VII\, \Count of Toggenburg\) + , (\Frédéric VIII\, \Count of Zollern\) + , (\Frédéric IX\, \the Old\) + , (\Frédéric X\, \the Younger\) ] + +val (tab0, markup) = + fold_map (fn (name, msg) => fn reports => + let val id = serial () + val pos = [Input.pos_of name] + in + ( (fst(Input.source_content msg), (name, pos, id)) + , report pos (markup_tvar NONE pos) (fst(Input.source_content name), id) reports) + end) + data + [] + +val () = Position.reports_text markup +in +val tab = Symtab.make tab0 +end +\ + +ML \ +val _ = + fold (fn input => + let + val pos1' = Input.pos_of input + fun ctnt name0 = fst(Input.source_content name0) + val pos1 = [pos1'] + val msg1 = fst(Input.source_content input) + val msg2 = "No persons were found to have such nickname" + in + case Symtab.lookup tab (fst(Input.source_content input)) of + NONE => tap (fn _ => Output.information (msg2 ^ Position.here_list pos1)) + (cons ((pos1', Markup.bad ()), "")) + | SOME (name0, pos0, id) => report pos1 (markup_tvar (SOME (ctnt name0)) pos0) (msg1, id) + end) + [ \the Knight\ \ \Example of a correct retrieval (CTRL + Hovering shows what we are expecting)\ + , \the Handsome\ \ \Example of a correct retrieval (CTRL + Hovering shows what we are expecting)\ + , \the Spy\ \ \Example of a failure to retrieve the person in \<^ML>\tab\\ + ] + [] + |> Position.reports_text +\ + +text\The pudding comes with the eating: \ + +subsection\Environment Structured Reporting\ + +text\ The structure @{ML_structure \Name_Space\} offers an own infra-structure for names and +manages the markup accordingly. MORE TO COME\ text\ @{ML_type "'a Name_Space.table"} \ -section\ Parsing issues \ -text\ Parsing combinators represent the ground building blocks of both generic input engines -as well as the specific Isar framework. They are implemented in the structure \verb+Token+ -providing core type \verb+Token.T+. +section\ The System Lexer and Token Issues\ +text\Four syntactic contexts are predefined in Isabelle (others can be added): +the ML context, the text context, the Isar-command context and the teerm-context, referring +to different needs of the Isabelle Framework as an extensible framework supporting incremental, +partially programmable extensions and as a Framework geared towards Formal Proofs and therefore +mathematical notations. The basic data-structure for the lexical treatment of these + \ -ML\ open Token\ + +subsection\Tokens\ + +text\The basic entity that lexers treat are \<^emph>\tokens\. defined in @{ML_structure "Token"}} +It provides a classification infrastructure, the references to positions and Markup +as well as way's to annotate tokens with (some) values they denote: +\ + +ML\ +local + open Token + + type dummy = Token.T + type src = Token.T list + type file = {src_path: Path.T, lines: string list, digest: SHA1.digest, pos: Position.T} + + type name_value = {name: string, kind: string, print: Proof.context -> Markup.T * xstring} + + + val _ = Token.is_command : Token.T -> bool; + val _ = Token.content_of : Token.T -> string; (* textueller kern eines Tokens. *) + + + val _ = pos_of: T -> Position.T + +(* +datatype kind = + (*immediate source*) + Command | Keyword | Ident | Long_Ident | Sym_Ident | Var | Type_Ident | Type_Var | Nat | + Float | Space | + (*delimited content*) + String | Alt_String | Verbatim | Cartouche | Comment of Comment.kind option | + (*special content*) + Error of string | EOF + + datatype value = + Source of src | + Literal of bool * Markup.T | + Name of name_value * morphism | + Typ of typ | + Term of term | + Fact of string option * thm list | + Attribute of morphism -> attribute | + Declaration of declaration | + Files of file Exn.result list + + +*) +in val _ = () +end +\ + + + +subsection\A Lexer Configuration Example\ + +ML\ + +\ + + +section\ Combinator Parsing \ +text\Parsing Combinators go back to monadic programming as advocated by Wadler et. al, and has been +worked out @{cite "DBLP:journals/jfp/Hutton92"}\ + + +ML\ \ ML\ @@ -1172,8 +1346,6 @@ val _ = parser2contextparser : 'a parser -> 'a context_parser; (* bah, is the same as Scan.lift *) val _ = Scan.lift Args.cartouche_input : Input.source context_parser; -Token.is_command; -Token.content_of; (* textueller kern eines Tokens. *) \ diff --git a/examples/technical_report/TR_my_commented_isabelle/ROOT b/examples/technical_report/TR_my_commented_isabelle/ROOT index c57b3d6..8a75e64 100644 --- a/examples/technical_report/TR_my_commented_isabelle/ROOT +++ b/examples/technical_report/TR_my_commented_isabelle/ROOT @@ -1,8 +1,9 @@ -session "TR_mycommentedisabelle" = "Isabelle_DOF" + +session "TR_MyCommentedIsabelle" = "Isabelle_DOF" + options [document = pdf, document_output = "output",quick_and_dirty = true] theories - "MyCommentedIsabelle" + "TR_MyCommentedIsabelle" document_files + "root.bib" "isadof.cfg" "preamble.tex" "prooftree.sty" diff --git a/examples/technical_report/TR_my_commented_isabelle/TR_MyCommentedIsabelle.thy b/examples/technical_report/TR_my_commented_isabelle/TR_MyCommentedIsabelle.thy new file mode 100644 index 0000000..5a48245 --- /dev/null +++ b/examples/technical_report/TR_my_commented_isabelle/TR_MyCommentedIsabelle.thy @@ -0,0 +1,2013 @@ +(*<*) +theory TR_MyCommentedIsabelle + imports "Isabelle_DOF.technical_report" + (*imports "../../../ontologies/technical_report"*) +begin + + +open_monitor*[this::report] +(*>*) + +title*[tit::title]\My Personal, Ecclectic Isabelle Programming Manual\ +subtitle*[stit::subtitle]\Version : Isabelle 2019\ +text*[bu::author, + email = "''wolff@lri.fr''", + affiliation = "\Université Paris-Saclay, LRI, France\"]\Burkhart Wolff\ + + +text*[abs::abstract, + keywordlist="[''LCF-Architecture'',''Isabelle'',''SML'',''PIDE'',''Programming Guide'', + ''Tactic Programming'']"]\ +While Isabelle is mostly known as part of Isabelle/HOL (an interactive +theorem prover), it actually provides a system framework for developing a wide +spectrum of applications. A particular strength of the Isabelle framework is the +combination of text editing, formal verification, and code generation. + +This is a programming-tutorial of Isabelle and Isabelle/HOL, a complementary +text to the unfortunately somewhat outdated "The Isabelle Cookbook" in +\url{https://nms.kcl.ac.uk/christian.urban/Cookbook/}. The reader is encouraged +not only to consider the generated .pdf, but also consult the loadable version in Isabelle/jedit +in order to make experiments on the running code. + +This text is written itself in Isabelle/Isar using a specific document ontology +for technical reports. It is intended to be a "living document", i.e. it is not only +used for generating a static, conventional .pdf, but also for direct interactive +exploration in Isabelle/jedit. This way, types, intermediate results of computations and +checks can be repeated by the reader who is invited to interact with this document. +Moreover, the textual parts have been enriched with a maximum of formal content +which makes this text re-checkable at each load and easier maintainable. +\ + +chapter*[intro::introduction]\ Introduction \ + +text\ +While Isabelle is mostly known as part of Isabelle/HOL (an interactive theorem prover), it actually +provides a system framework for developing a wide spectrum of applications. A particular strength of +the Isabelle framework is the combination of text editing, formal verification, and code generation. +This is a programming-tutorial of Isabelle and Isabelle/HOL, a complementary text to the +unfortunately somewhat outdated "The Isabelle Cookbook" in +\url{https://nms.kcl.ac.uk/christian.urban/Cookbook/}. The reader is encouraged not only to consider +the generated .pdf, but also consult the loadable version in Isabelle/jedit in order to make +experiments on the running code. This text is written itself in Isabelle/Isar using a specific +document ontology for technical reports. It is intended to be a "living document", i.e. it is not +only used for generating a static, conventional .pdf, but also for direct interactive exploration +in Isabelle/jedit. This way, types, intermediate results of computations and checks can be +repeated by the reader who is invited to interact with this document. Moreover, the textual +parts have been enriched with a maximum of formal content which makes this text re-checkable at +each load and easier maintainable.\ + +figure*[architecture::figure,relative_width="80",src="''figures/isabelle-architecture''"]\ + The system architecture of Isabelle (left-hand side) and the + asynchronous communication between the Isabelle system and + the IDE (right-hand side). \ + +text\This cookbook roughly follows the Isabelle system architecture shown in +@{figure "architecture"}, and, to be more precise, more or less in the bottom-up order. + +We start from the basic underlying SML platform over the Kernels to the tactical layer +and end with a discussion over the front-end technologies. +\ + +chapter*[t1::technical]\ SML and Fundamental SML libraries \ + +section*[t11::technical] "ML, Text and Antiquotations" + +text\Isabelle is written in SML, the "Standard Meta-Language", which is is an impure functional +programming language allowing, in principle, mutable variables and side-effects. +The following Isabelle/Isar commands allow for accessing the underlying SML interpreter +of Isabelle directly. In the example, a mutable variable X is declared, defined to 0 and updated; +and finally re-evaluated leading to output: \ + +ML\ val X = Unsynchronized.ref 0; + X:= !X + 1; + X + \ + +text\However, since Isabelle is a platform involving parallel execution, concurrent computing, and, +as an interactive environment, involves backtracking / re-evaluation as a consequence of user- +interaction, imperative programming is discouraged and nearly never used in the entire Isabelle +code-base. The preferred programming style is purely functional: \ + +ML\ fun fac x = if x = 0 then 1 else x * fac(x-1) ; + fac 10; + \ +(* or *) +ML\ type state = { a : int, b : string} + fun incr_state ({a, b}:state) = {a=a+1, b=b} + \ + +text\ The faculty function is defined and executed; the (sub)-interpreter in Isar +works in the conventional read-execute-print loop for each statement separated by a ";". +Functions, types, data-types etc. can be grouped to modules (called \<^emph>\structures\) which can +be constrained to interfaces (called \<^emph>\signatures\) and even be parametric structures +(called \<^emph>\functors\). \ + +text\ The Isabelle/Isar interpreter (called \<^emph>\toplevel\ ) is extensible; by a mixture of SML +and Isar-commands, domain-specific components can be developed and integrated into the system +on the fly. Actually, the Isabelle system code-base consists mainly of SML and \<^verbatim>\.thy\-files +containing such mixtures of Isar commands and SML. \ + +text\ Besides the ML-command used in the above examples, there are a number of commands +representing text-elements in Isabelle/Isar; text commands can be interleaved arbitraryly +with other commands. Text in text-commands may use LaTeX and is used for type-setting +documentations in a kind of literate programming style. \ + +text\So: the text command for:\ + +text\\<^emph>\This is a text.\\ + +text\... is represented in an \<^verbatim>\.thy\ file by:\ + +text\\verb+text\\<^emph>\This is a text.\\+\ + +text\and desplayed in the Isabelle/jedit front-end at the sceen by:\ + +figure*[fig2::figure, relative_width="100",src="''figures/text-element''"] + \A text-element as presented in Isabelle/jedit. \ + +text\ text-commands, ML- commands (and in principle any other command) can be seen as +\<^emph>\quotations\ over the underlying SML environment (similar to OCaml or Haskell). +Linking these different sorts of quotations with each other and the underlying SML-envirnment +is supported via \<^emph>\antiquotations\'s. Generally speaking, antiquotations are a programming +technique to specify expressions or patterns in a quotation, parsed in the context of the +enclosing expression or pattern where the quotation is. Another way to understand this concept: +anti-quotations are "semantic macros" that produce a value for the surrounding context +(ML, text, HOL, whatever) depending on local arguments and the underlying logical context. + +The way an antiquotation is specified depends on the quotation expander: typically a specific +character and an identifier, or specific parentheses and a complete expression or pattern.\ + +text\ In Isabelle documentations, antiquotation's were heavily used to enrich literate explanations +and documentations by "formal content", i.e. machine-checked, typed references +to all sorts of entities in the context of the interpreting environment. +Formal content allows for coping with sources that rapidly evolve and were developed by +distributed teams as is typical in open-source developments. +A paradigmatic example for antiquotation in Texts and Program snippets is here: +\ + +text\ @{footnote \sdf\}, @{file "$ISABELLE_HOME/src/Pure/ROOT.ML"}\ +ML\ val x = @{thm refl}; + val y = @{term "A \ B"} + val y = @{typ "'a list"} + \ + +text\... which we will describe in more detail later. \ + +text\In a way, literate specification attempting to maximize its formal content is a way to +ensure "Agile Development" in a (theory)-document development, at least for its objectives, +albeit not for its popular methods and processes like SCRUM. \ + +paragraph\ + A maximum of formal content inside text documentation also ensures the + consistency of this present text with the underlying Isabelle version.\ + +section\The Isabelle/Pure bootstrap\ + +text\It is instructive to study the fundamental bootstrapping sequence of the Isabelle system; + it is written in the Isar format and gives an idea of the global module dependencies: + @{file "$ISABELLE_HOME/src/Pure/ROOT.ML"}. Loading this file + (for example by hovering over this hyperlink in the antiquotation holding control or + command key in Isabelle/jedit and activating it) allows the Isabelle IDE + to support hyperlinking \<^emph>\inside\ the Isabelle source.\ + +text\The bootstrapping sequence is also reflected in the following diagram +@{figure "architecture"}. \ + + +section*[t12::technical] "Elements of the SML library"; +text\Elements of the @{file "$ISABELLE_HOME/src/Pure/General/basics.ML"} SML library +are basic exceptions. Note that exceptions should be catched individually, uncatched exceptions +except those generated by the specific "error" function are discouraged in Isabelle +source programming since they might produce races. Finally, a number of commonly +used "squigglish" combinators is listed: +\ +ML\ + Bind : exn; + Chr : exn; + Div : exn; + Domain : exn; + Fail : string -> exn; + Match : exn; + Overflow : exn; + Size : exn; + Span : exn; + Subscript : exn; + + exnName : exn -> string ; (* -- very interesting to query an unknown exception *) + exnMessage : exn -> string ; +\ + +ML\ +op ! : 'a Unsynchronized.ref -> 'a; +op := : ('a Unsynchronized.ref * 'a) -> unit; + +op #> : ('a -> 'b) * ('b -> 'c) -> 'a -> 'c; (* reversed function composition *) +op o : (('b -> 'c) * ('a -> 'b)) -> 'a -> 'c; +op |-- : ('a -> 'b * 'c) * ('c -> 'd * 'e) -> 'a -> 'd * 'e; +op --| : ('a -> 'b * 'c) * ('c -> 'd * 'e) -> 'a -> 'b * 'e; +op -- : ('a -> 'b * 'c) * ('c -> 'd * 'e) -> 'a -> ('b * 'd) * 'e; +op ? : bool * ('a -> 'a) -> 'a -> 'a; +ignore: 'a -> unit; +op before : ('a * unit) -> 'a; +I: 'a -> 'a; +K: 'a -> 'b -> 'a +\ + +text\Some basic examples for the programming style using these combinators can be found in the + "The Isabelle Cookbook" section 2.3.\ + +text\An omnipresent data-structure in the Isabelle SML sources are tables +implemented in @{file "$ISABELLE_HOME/src/Pure/General/table.ML"}. These +generic tables are presented in an efficient purely functional implementation using +balanced 2-3 trees. Key operations are:\ +ML\ +signature TABLE_reduced = +sig + type key + type 'a table + exception DUP of key + exception SAME + exception UNDEF of key + val empty: 'a table + val dest: 'a table -> (key * 'a) list + val keys: 'a table -> key list + val lookup_key: 'a table -> key -> (key * 'a) option + val lookup: 'a table -> key -> 'a option + val defined: 'a table -> key -> bool + val update: key * 'a -> 'a table -> 'a table + (* ... *) +end +\ +text\... where \<^verbatim>\key\ is usually just a synonym for string.\ + +chapter*[t2::technical] \ Prover Architecture \ + +section*[t21::technical] \ The Nano-Kernel: Contexts, (Theory)-Contexts, (Proof)-Contexts \ + +text\ What I call the 'Nano-Kernel' in Isabelle can also be seen as an acyclic theory graph. +The meat of it can be found in the file @{file "$ISABELLE_HOME/src/Pure/context.ML"}. +My notion is a bit criticisable since this component, which provides the type of @{ML_type theory} +and @{ML_type Proof.context}, is not that Nano after all. +However, these type are pretty empty place-holders at that level and the content of +@{file "$ISABELLE_HOME/src/Pure/theory.ML"} is registered much later. +The sources themselves mention it as "Fundamental Structure". +In principle, theories and proof contexts could be REGISTERED as user data inside contexts. +The chosen specialization is therefore an acceptable meddling of the abstraction "Nano-Kernel" +and its application context: Isabelle. + +Makarius himself says about this structure: + +"Generic theory contexts with unique identity, arbitrarily typed data, +monotonic development graph and history support. Generic proof +contexts with arbitrarily typed data." + +In my words: a context is essentially a container with +\<^item> an id +\<^item> a list of parents (so: the graph structure) +\<^item> a time stamp and +\<^item> a sub-context relation (which uses a combination of the id and the time-stamp to + establish this relation very fast whenever needed; it plays a crucial role for the + context transfer in the kernel. + + +A context comes in form of three 'flavours': +\<^item> theories : @{ML_type theory}'s, containing a syntax and axioms, but also + user-defined data and configuration information. +\<^item> Proof-Contexts: @{ML_type Proof.context} + containing theories but also additional information if Isar goes into prove-mode. + In general a richer structure than theories coping also with fixes, facts, goals, + in order to support the structured Isar proof-style. +\<^item> Generic: @{ML_type Context.generic }, i.e. the sum of both. + +All context have to be seen as mutable; so there are usually transformations defined on them +which are possible as long as a particular protocol (\<^verbatim>\begin_thy\ - \<^verbatim>\end_thy\ etc) are respected. + +Contexts come with type user-defined data which is mutable through the entire lifetime of +a context. +\ + +subsection*[t212::technical]\ Mechanism 1 : Core Interface. \ +text\To be found in @{file "$ISABELLE_HOME/src/Pure/context.ML"}:\ + +ML\ +Context.parents_of: theory -> theory list; +Context.ancestors_of: theory -> theory list; +Context.proper_subthy : theory * theory -> bool; +Context.Proof: Proof.context -> Context.generic; (*constructor*) +Context.proof_of : Context.generic -> Proof.context; +Context.certificate_theory_id : Context.certificate -> Context.theory_id; +Context.theory_name : theory -> string; +Context.map_theory: (theory -> theory) -> Context.generic -> Context.generic; +\ + + +text\ML structure @{ML_structure Proof_Context}:\ +ML\ + Proof_Context.init_global: theory -> Proof.context; + Context.Proof: Proof.context -> Context.generic; (* the path to a generic Context *) + Proof_Context.get_global: theory -> string -> Proof.context +\ + + +subsection*[t213::example]\Mechanism 2 : global arbitrary data structure that is attached to + the global and local Isabelle context $\theta$ \ +ML \ + +datatype X = mt +val init = mt; +val ext = I +fun merge (X,Y) = mt + +structure Data = Generic_Data +( + type T = X + val empty = init + val extend = ext + val merge = merge +); + + + Data.get : Context.generic -> Data.T; + Data.put : Data.T -> Context.generic -> Context.generic; + Data.map : (Data.T -> Data.T) -> Context.generic -> Context.generic; + (* there are variants to do this on theories ... *) +\ + + +section\ The LCF-Kernel: terms, types, theories, proof\_contexts, thms \ +text\The classical LCF-style \<^emph>\kernel\ is about +\<^enum> Types and terms of a typed $\lambda$-Calculus including constant symbols, + free variables, $\lambda$-binder and application, +\<^enum> An infrastructure to define types and terms, a \<^emph>\signature\, + that also assigns to constant symbols types +\<^enum> An abstract type of \<^emph>\theorem\ and logical operations on them +\<^enum> (Isabelle specific): a notion of \<^emph>\theory\, i.e. a container + providing a signature and set (list) of theorems. +\ + + +subsection\ Terms and Types \ +text \A basic data-structure of the kernel is @{file "$ISABELLE_HOME/src/Pure/term.ML"} \ +ML\ open Term; +signature TERM' = sig + (* ... *) + type indexname = string * int + type class = string + type sort = class list + type arity = string * sort list * sort + datatype typ = + Type of string * typ list | + TFree of string * sort | + TVar of indexname * sort + datatype term = + Const of string * typ | + Free of string * typ | + Var of indexname * typ | + Bound of int | + Abs of string * typ * term | + $ of term * term + exception TYPE of string * typ list * term list + exception TERM of string * term list + (* ... *) +end +\ + +text\This core-data structure of the Isabelle Kernel is accessible in the Isabelle/ML environment +and serves as basis for programmed extensions concerning syntax, type-checking, and advanced +tactic programming over kernel primitives and higher API's. There are a number of anti-quotations +giving support for this task; since @{ML Const}-names are long-names revealing information +of the potentially evolving library structure, the use of anti-quotations leads to a safer programming +style of tactics and became therefore standard in the entire Isabelle code-base. +\ + +text\The following examples show how term- and type-level antiquotations are used and that + they can both be used for term-construction as well as term-destruction (pattern-matching):\ + +ML\ val Const ("HOL.implies", @{typ "bool \ bool \ bool"}) + $ Free ("A", @{typ "bool"}) + $ Free ("B", @{typ "bool"}) + = @{term "A \ B"}; + + val "HOL.bool" = @{type_name "bool"}; + + (* three ways to write type bool:@ *) + val Type("fun",[s,Type("fun",[@{typ "bool"},Type(@{type_name "bool"},[])])]) = @{typ "bool \ bool \ bool"}; + +\ + +text\Note that the SML interpreter is configured that he will actually print a type + \<^verbatim>\Type("HOL.bool",[])\ just as \<^verbatim>\"bool": typ\, so a compact notation looking + pretty much like a string. This can be confusing at times.\ + +text\Note, furthermore, that there is a programming API for the HOL-instance of Isabelle: +it is contained in @{file "$ISABELLE_HOME/src/HOL/Tools/hologic.ML"}. It offers for many +operators of the HOL logic specific constructors and destructors: +\ + +ML\ +HOLogic.boolT : typ; +HOLogic.mk_Trueprop: term -> term; +HOLogic.dest_Trueprop: term -> term; +HOLogic.Trueprop_conv: conv -> conv; +HOLogic.mk_setT: typ -> typ; +HOLogic.dest_setT: typ -> typ; +HOLogic.Collect_const: typ -> term; +HOLogic.mk_Collect: string * typ * term -> term; +HOLogic.mk_mem: term * term -> term; +HOLogic.dest_mem: term -> term * term; +HOLogic.mk_set: typ -> term list -> term; +HOLogic.conj_intr: Proof.context -> thm -> thm -> thm; +HOLogic.conj_elim: Proof.context -> thm -> thm * thm; +HOLogic.conj_elims: Proof.context -> thm -> thm list; +HOLogic.conj: term; +HOLogic.disj: term; +HOLogic.imp: term; +HOLogic.Not: term; +HOLogic.mk_not: term -> term; +HOLogic.mk_conj: term * term -> term; +HOLogic.dest_conj: term -> term list; +HOLogic.conjuncts: term -> term list; +(* ... *) +\ + + + +subsection\ Type-Certification (=checking that a type annotation is consistent) \ + +ML\ Type.typ_instance: Type.tsig -> typ * typ -> bool (* raises TYPE_MATCH *) \ +text\ there is a joker type that can be added as place-holder during term construction. + Jokers can be eliminated by the type inference. \ + +ML\ Term.dummyT : typ \ + +ML\ +Sign.typ_instance: theory -> typ * typ -> bool; +Sign.typ_match: theory -> typ * typ -> Type.tyenv -> Type.tyenv; +Sign.typ_unify: theory -> typ * typ -> Type.tyenv * int -> Type.tyenv * int; +Sign.const_type: theory -> string -> typ option; +Sign.certify_term: theory -> term -> term * typ * int; (* core routine for CERTIFICATION of types*) +Sign.cert_term: theory -> term -> term; (* short-cut for the latter *) +Sign.tsig_of: theory -> Type.tsig (* projects the type signature *) +\ +text\ +@{ML "Sign.typ_match"} etc. is actually an abstract wrapper on the structure +@{ML_structure "Type"} +which contains the heart of the type inference. +It also contains the type substitution type @{ML_type "Type.tyenv"} which is +is actually a type synonym for @{ML_type "(sort * typ) Vartab.table"} +which in itself is a synonym for @{ML_type "'a Symtab.table"}, so +possesses the usual @{ML "Symtab.empty"} and @{ML "Symtab.dest"} operations. \ + +text\Note that @{emph \polymorphic variables\} are treated like constant symbols +in the type inference; thus, the following test, that one type is an instance of the +other, yields false: +\ +ML\ +val ty = @{typ "'a option"}; +val ty' = @{typ "int option"}; + +val Type("List.list",[S]) = @{typ "('a) list"}; (* decomposition example *) + +val false = Sign.typ_instance @{theory}(ty', ty); +\ +text\In order to make the type inference work, one has to consider @{emph \schematic\} +type variables, which are more and more hidden from the Isar interface. Consequently, +the typ antiquotation above will not work for schematic type variables and we have +to construct them by hand on the SML level: \ +ML\ +val t_schematic = Type("List.list",[TVar(("'a",0),@{sort "HOL.type"})]); +\ +text\ MIND THE "'" !!!\ +text \On this basis, the following @{ML_type "Type.tyenv"} is constructed: \ +ML\ +val tyenv = Sign.typ_match ( @{theory}) + (t_schematic, @{typ "int list"}) + (Vartab.empty); +val [(("'a", 0), (["HOL.type"], @{typ "int"}))] = Vartab.dest tyenv; +\ + +text\ Type generalization --- the conversion between free type variables and schematic +type variables --- is apparently no longer part of the standard API (there is a slightly +more general replacement in @{ML "Term_Subst.generalizeT_same"}, however). Here is a way to +overcome this by a self-baked generalization function:\ + +ML\ +val generalize_typ = Term.map_type_tfree (fn (str,sort)=> Term.TVar((str,0),sort)); +val generalize_term = Term.map_types generalize_typ; +val true = Sign.typ_instance @{theory} (ty', generalize_typ ty) +\ +text\... or more general variants thereof that are parameterized by the indexes for schematic +type variables instead of assuming just @{ML "0"}. \ + +text\ Example:\ +ML\val t = generalize_term @{term "[]"}\ + +text +\Now we turn to the crucial issue of type-instantiation and with a given type environment +@{ML "tyenv"}. For this purpose, one has to switch to the low-level interface +@{ML_structure "Term_Subst"}. +\ + +ML\ +Term_Subst.map_types_same : (typ -> typ) -> term -> term; +Term_Subst.map_aterms_same : (term -> term) -> term -> term; +Term_Subst.instantiate: ((indexname * sort) * typ) list * ((indexname * typ) * term) list -> term -> term; +Term_Subst.instantiateT: ((indexname * sort) * typ) list -> typ -> typ; +Term_Subst.generalizeT: string list -> int -> typ -> typ; + (* this is the standard type generalisation function !!! + only type-frees in the string-list were taken into account. *) +Term_Subst.generalize: string list * string list -> int -> term -> term + (* this is the standard term generalisation function !!! + only type-frees and frees in the string-lists were taken + into account. *) +\ + +text \Apparently, a bizarre conversion between the old-style interface and +the new-style @{ML "tyenv"} is necessary. See the following example.\ +ML\ +val S = Vartab.dest tyenv; +val S' = (map (fn (s,(t,u)) => ((s,t),u)) S) : ((indexname * sort) * typ) list; + (* it took me quite some time to find out that these two type representations, + obscured by a number of type-synonyms, where actually identical. *) +val ty = t_schematic; +val ty' = Term_Subst.instantiateT S' t_schematic; +val t = (generalize_term @{term "[]"}); + +val t' = Term_Subst.map_types_same (Term_Subst.instantiateT S') (t) +(* or alternatively : *) +val t'' = Term.map_types (Term_Subst.instantiateT S') (t) +\ + +subsection\ Type-Inference (= inferring consistent type information if possible) \ + +text\ Type inference eliminates also joker-types such as @{ML dummyT} and produces + instances for schematic type variables where necessary. In the case of success, + it produces a certifiable term. \ +ML\ +Type_Infer_Context.infer_types: Proof.context -> term list -> term list +\ + +subsection\ thy and the signature interface\ +ML\ +Sign.tsig_of: theory -> Type.tsig; +Sign.syn_of : theory -> Syntax.syntax; +Sign.of_sort : theory -> typ * sort -> bool ; +\ + +subsection\ Thm's and the LCF-Style, "Mikro"-Kernel \ +text\ + The basic constructors and operations on theorems@{file "$ISABELLE_HOME/src/Pure/thm.ML"}, + a set of derived (Pure) inferences can be found in @{file "$ISABELLE_HOME/src/Pure/drule.ML"}. + + The main types provided by structure \<^verbatim>\thm\ are certified types @{ML_type ctyp}, + certified terms @{ML_type cterm}, @{ML_type thm} as well as conversions @{ML_type conv}. +\ + +ML\ +signature BASIC_THM = +sig + type ctyp + type cterm + exception CTERM of string * cterm list + type thm + type conv = cterm -> thm + exception THM of string * int * thm list +end; +\ + +text\Certification of types and terms on the kernel-level is done by the generators:\ +ML\ + Thm.global_ctyp_of: theory -> typ -> ctyp; + Thm.ctyp_of: Proof.context -> typ -> ctyp; + Thm.global_cterm_of: theory -> term -> cterm; + Thm.cterm_of: Proof.context -> term -> cterm; +\ +text\... which perform type-checking in the given theory context in order to make a type + or term "admissible" for the kernel.\ + +text\ We come now to the very heart of the LCF-Kernel of Isabelle, which + provides the fundamental inference rules of Isabelle/Pure. + + Besides a number of destructors on @{ML_type thm}'s, + the abstract data-type \<^verbatim>\thm\ is used for logical objects of the form + $\Gamma \vdash_\theta \phi$, where $\Gamma$ represents a set of local assumptions, + $\theta$ the "theory" or more precisely the global context in which a formula $\phi$ + has been constructed just by applying the following operations representing + logical inference rules: + +\ +ML\ + (*inference rules*) + Thm.assume: cterm -> thm; + Thm.implies_intr: cterm -> thm -> thm; + Thm.implies_elim: thm -> thm -> thm; + Thm.forall_intr: cterm -> thm -> thm; + Thm.forall_elim: cterm -> thm -> thm; + + Thm.transfer : theory -> thm -> thm; + + Thm.generalize: string list * string list -> int -> thm -> thm; + Thm.instantiate: ((indexname*sort)*ctyp)list * ((indexname*typ)*cterm) list -> thm -> thm; +\ +text\ They reflect the Pure logic depicted in a number of presentations such as + M. Wenzel, \<^emph>\Parallel Proof Checking in Isabelle/Isar\, PLMMS 2009, or simiular papers. + Notated as logical inference rules, these operations were presented as follows: +\ + +side_by_side_figure*["text-elements"::side_by_side_figure,anchor="''fig-kernel1''", + caption="''Pure Kernel Inference Rules I ''",relative_width="48", + src="''figures/pure-inferences-I''",anchor2="''fig-kernel2''", + caption2="''Pure Kernel Inference Rules II''",relative_width2="47", + src2="''figures/pure-inferences-II''"]\ \ + +(* +figure*[kir1::figure,relative_width="100",src="''figures/pure-inferences-I''"] + \ Pure Kernel Inference Rules I.\ +figure*[kir2::figure,relative_width="100",src="''figures/pure-inferences-II''"] + \ Pure Kernel Inference Rules II. \ + *) + +text\Note that the transfer rule: +\[ +\begin{prooftree} + \Gamma \vdash_\theta \phi \qquad \qquad \theta \sqsubseteq \theta' +\justifies + \Gamma \vdash_{\theta'} \phi \qquad \qquad +\end{prooftree} +\] +which is a consequence of explicit theories characteristic for Isabelle's LCF-kernel design +and a remarkable difference to its sisters HOL-Light and HOL4; instead of transfer, these systems +reconstruct proofs in an enlarged global context instead of taking the result and converting it.\ + +text\Besides the meta-logical (Pure) implication $\_\Longrightarrow \_$, the Kernel axiomatizes +also a Pure-Equality $\_ \equiv \_ $ used for definitions of constant symbols: \ +ML\ + Thm.reflexive: cterm -> thm; + Thm.symmetric: thm -> thm; + Thm.transitive: thm -> thm -> thm; +\ +text\The operation:\ +ML\ Thm.trivial: cterm -> thm; \ +text\... produces the elementary tautologies of the form @{prop "A \ A"}, + an operation used to start a backward-style proof.\ + +text\The elementary conversions are:\ +ML\ + Thm.beta_conversion: bool -> conv; + Thm.eta_conversion: conv; + Thm.eta_long_conversion: conv; +\ + +text\On the level of @{ML_structure "Drule"}, a number of higher-level operations is established, + which is in part accessible by a number of forward-reasoning notations on Isar-level.\ +ML\ + op RSN: thm * (int * thm) -> thm; + op RS: thm * thm -> thm; + op RLN: thm list * (int * thm list) -> thm list; + op RL: thm list * thm list -> thm list; + op MRS: thm list * thm -> thm; + op OF: thm * thm list -> thm; + op COMP: thm * thm -> thm; +\ + + +subsection\ Theories \ + +text \ This structure yields the datatype \verb*thy* which becomes the content of +\verb*Context.theory*. In a way, the LCF-Kernel registers itself into the Nano-Kernel, +which inspired me (bu) to this naming. + +\ +ML\ + +(* intern Theory.Thy; + +datatype thy = Thy of + {pos: Position.T, + id: serial, + axioms: term Name_Space.table, + defs: Defs.T, + wrappers: wrapper list * wrapper list}; + +*) + +Theory.check: {long: bool} -> Proof.context -> string * Position.T -> theory; + +Theory.local_setup: (Proof.context -> Proof.context) -> unit; +Theory.setup: (theory -> theory) -> unit; (* The thing to extend the table of "command"s with parser - callbacks. *) +Theory.get_markup: theory -> Markup.T; +Theory.axiom_table: theory -> term Name_Space.table; +Theory.axiom_space: theory -> Name_Space.T; +Theory.axioms_of: theory -> (string * term) list; +Theory.all_axioms_of: theory -> (string * term) list; +Theory.defs_of: theory -> Defs.T; +Theory.at_begin: (theory -> theory option) -> theory -> theory; +Theory.at_end: (theory -> theory option) -> theory -> theory; +Theory.begin_theory: string * Position.T -> theory list -> theory; +Theory.end_theory: theory -> theory; + +\ + + +section\Backward Proofs: Tactics, Tacticals and Goal-States\ + +text\At this point, we leave the Pure-Kernel and start to describe the first layer on top +of it, involving support for specific styles of reasoning and automation of reasoning.\ + +text\ \<^verbatim>\tactic\'s are in principle \<^emph>\relations\ on theorems @{ML_type "thm"}; this gives a natural way +to represent the fact that HO-Unification (and therefore the mechanism of rule-instantiation) +are non-deterministic in principle. Heuristics may choose particular preferences between +the theorems in the range of this relation, but the Isabelle Design accepts this fundamental +fact reflected at this point in the prover architecture. +This potentially infinite relation is implemented by a function of theorems to lazy lists +over theorems, which gives both sufficient structure for heuristic +considerations as well as a nice algebra, called \<^verbatim>\TACTICAL\'s, providing a bottom element +@{ML "no_tac"} (the function that always fails), the top-element @{ML "all_tac"} + (the function that never fails), sequential composition @{ML "op THEN"}, (serialized) +non-deterministic composition @{ML "op ORELSE"}, conditionals, repetitions over lists, etc. +The following is an excerpt of @{file "~~/src/Pure/tactical.ML"}: +\ + +ML\ +signature TACTICAL = +sig + type tactic = thm -> thm Seq.seq + + val all_tac: tactic + val no_tac: tactic + val COND: (thm -> bool) -> tactic -> tactic -> tactic + + val THEN: tactic * tactic -> tactic + val ORELSE: tactic * tactic -> tactic + val THEN': ('a -> tactic) * ('a -> tactic) -> 'a -> tactic + val ORELSE': ('a -> tactic) * ('a -> tactic) -> 'a -> tactic + + val TRY: tactic -> tactic + val EVERY: tactic list -> tactic + val EVERY': ('a -> tactic) list -> 'a -> tactic + val FIRST: tactic list -> tactic + (* ... *) +end +\ + +text\The next layer in the architecture describes @{ML_type \tactic\}'s, i.e. basic operations on +theorems in a backward reasoning style (bottom up development of proof-trees). An initial goal-state +for some property @{term A} --- the \<^emph>\goal\ --- is constructed via the kernel +@{ML "Thm.trivial"}-operation into @{term "A \ A"}, and tactics either refine the premises +--- the \<^emph>\subgoals\ the of this meta-implication --- +producing more and more of them or eliminate them in subsequent goal-states. Subgoals of the form +@{term "B\<^sub>1 \ B\<^sub>2 \ A \ B\<^sub>3 \ B\<^sub>4 \ A"} can be eliminated via +the @{ML Tactic.assume_tac} - tactic, +and a subgoal @{term C\<^sub>m} can be refined via the theorem +@{term "E\<^sub>1 \ E\<^sub>2 \ E\<^sub>3 \ C\<^sub>m"} the @{ML Tactic.resolve_tac} - tactic to new subgoals +@{term "E\<^sub>1"},@{term "E\<^sub>2"}, @{term "E\<^sub>3"}. In case that a theorem used for resolution +has no premise @{term "E\<^sub>i"}, the subgoal @{term C\<^sub>m} is also eliminated ("closed"). + +The following abstract of the most commonly used @{ML_type \tactic\}'s drawn from +@{file "~~/src/Pure/tactic.ML"} looks as follows: +\ + +ML\ + (* ... *) + assume_tac: Proof.context -> int -> tactic; + compose_tac: Proof.context -> (bool * thm * int) -> int -> tactic; + resolve_tac: Proof.context -> thm list -> int -> tactic; + eresolve_tac: Proof.context -> thm list -> int -> tactic; + forward_tac: Proof.context -> thm list -> int -> tactic; + dresolve_tac: Proof.context -> thm list -> int -> tactic; + rotate_tac: int -> int -> tactic; + defer_tac: int -> tactic; + prefer_tac: int -> tactic; + (* ... *) +\ +text\Note that "applying a rule" is a fairly complex operation in the Extended Isabelle Kernel, +i.e. the tactic layer. It involves at least four phases, interfacing a theorem +coming from the global context $\theta$ (=theory), be it axiom or derived, into a given goal-state. +\<^item> \<^emph>\generalization\. All free variables in the theorem were replaced by schematic variables. + For example, @{term "x + y = y + x"} is converted into + @{emph \?x + ?y = ?y + ?x\ }. + By the way, type variables were treated equally. +\<^item> \<^emph>\lifting over assumptions\. If a subgoal is of the form: + @{term "B\<^sub>1 \ B\<^sub>2 \ A"} and we have a theorem @{term "D\<^sub>1 \ D\<^sub>2 \ A"}, then before + applying the theorem, the premisses were \<^emph>\lifted\ resulting in the logical refinement: + @{term "(B\<^sub>1 \ B\<^sub>2 \ D\<^sub>1) \ (B\<^sub>1 \ B\<^sub>2 \ D\<^sub>2) \ A"}. Now, @{ML "resolve_tac"}, for example, + will replace the subgoal @{term "B\<^sub>1 \ B\<^sub>2 \ A"} by the subgoals + @{term "B\<^sub>1 \ B\<^sub>2 \ D\<^sub>1"} and @{term "B\<^sub>1 \ B\<^sub>2 \ D\<^sub>2"}. Of course, if the theorem wouldn't + have assumptions @{term "D\<^sub>1"} and @{term "D\<^sub>2"}, the subgoal @{term "A"} would be replaced by + \<^bold>\nothing\, i.e. deleted. +\<^item> \<^emph>\lifting over parameters\. If a subgoal is meta-quantified like in: + @{term "\ x y z. A x y z"}, then a theorem like @{term "D\<^sub>1 \ D\<^sub>2 \ A"} is \<^emph>\lifted\ + to @{term "\ x y z. D\<^sub>1' \ D\<^sub>2' \ A'"}, too. Since free variables occurring in @{term "D\<^sub>1"}, + @{term "D\<^sub>2"} and @{term "A"} have been replaced by schematic variables (see phase one), + they must be replaced by parameterized schematic variables, i. e. a kind of skolem function. + For example, @{emph \?x + ?y = ?y + ?x\ } would be lifted to + @{emph \!! x y z. ?x x y z + ?y x y z = ?y x y z + ?x x y z\}. This way, the lifted theorem + can be instantiated by the parameters @{term "x y z"} representing "fresh free variables" + used for this sub-proof. This mechanism implements their logically correct bookkeeping via + kernel primitives. +\<^item> \<^emph>\Higher-order unification (of schematic type and term variables)\. + Finally, for all these schematic variables, a solution must be found. + In the case of @{ML resolve_tac}, the conclusion of the (doubly lifted) theorem must + be equal to the conclusion of the subgoal, so @{term A} must be $\alpha/\beta$-equivalent to + @{term A'} in the example above, which is established by a higher-order unification + process. It is a bit unfortunate that for implementation efficiency reasons, a very substantial + part of the code for HO-unification is in the kernel module @{ML_type "thm"}, which makes this + critical component of the architecture larger than necessary. +\ + +text\In a way, the two lifting processes represent an implementation of the conversion between +Gentzen Natural Deduction (to which Isabelle/Pure is geared) reasoning and +Gentzen Sequent Deduction.\ + + +section\The classical goal package\ + +ML\ open Goal; +Goal.prove_internal : Proof.context -> cterm list -> cterm -> (thm list -> tactic) -> thm; + +Goal.prove_global : theory -> string list -> term list -> term -> + ({context: Proof.context, prems: thm list} -> tactic) -> thm +\ + +section\The Isar Engine\ + +text\The main structure of the Isar-engine is @{ ML_structure Toplevel} and provides and +internal @{ ML_type state} with the necessary infrastructure --- +i.e. the operations to pack and unpack theories and Proof.contexts --- on it: +\ +ML\ + Toplevel.theory_toplevel: theory -> Toplevel.state; + Toplevel.init_toplevel: unit -> Toplevel.state; + Toplevel.is_toplevel: Toplevel.state -> bool; + Toplevel.is_theory: Toplevel.state -> bool; + Toplevel.is_proof: Toplevel.state -> bool; + Toplevel.is_skipped_proof: Toplevel.state -> bool; + Toplevel.level: Toplevel.state -> int; + Toplevel.context_of: Toplevel.state -> Proof.context; + Toplevel.generic_theory_of: Toplevel.state -> generic_theory; + Toplevel.theory_of: Toplevel.state -> theory; + Toplevel.proof_of: Toplevel.state -> Proof.state; + Toplevel.presentation_context: Toplevel.state -> Proof.context; + +\ + +subsection \Transaction Management in the Isar-Engine : The Toplevel \ + +ML\ + +Toplevel.exit: Toplevel.transition -> Toplevel.transition; +Toplevel.keep: (Toplevel.state -> unit) -> Toplevel.transition -> Toplevel.transition; +Toplevel.keep': (bool -> Toplevel.state -> unit) -> Toplevel.transition -> Toplevel.transition; +Toplevel.ignored: Position.T -> Toplevel.transition; +Toplevel.generic_theory: (generic_theory -> generic_theory) -> Toplevel.transition -> Toplevel.transition; +Toplevel.theory': (bool -> theory -> theory) -> Toplevel.transition -> Toplevel.transition; +Toplevel.theory: (theory -> theory) -> Toplevel.transition -> Toplevel.transition; + +Toplevel.present_local_theory: +(xstring * Position.T) option -> + (Toplevel.state -> unit) -> Toplevel.transition -> Toplevel.transition; +(* where text treatment and antiquotation parsing happens *) + + +(*fun document_command markdown (loc, txt) = + Toplevel.keep (fn state => + (case loc of + NONE => ignore (output_text state markdown txt) + | SOME (_, pos) => + error ("Illegal target specification -- not a theory context" ^ Position.here pos))) o + Toplevel.present_local_theory loc (fn state => ignore (output_text state markdown txt)); *) + +(* Isar Toplevel Steuerung *) +Toplevel.keep : (Toplevel.state -> unit) -> Toplevel.transition -> Toplevel.transition; + (* val keep' = add_trans o Keep; + fun keep f = add_trans (Keep (fn _ => f)); + *) + +Toplevel.present_local_theory : (xstring * Position.T) option -> (Toplevel.state -> unit) -> + Toplevel.transition -> Toplevel.transition; + (* fun present_local_theory target = present_transaction (fn int => + (fn Theory (gthy, _) => + let val (finish, lthy) = Named_Target.switch target gthy; + in Theory (finish lthy, SOME lthy) end + | _ => raise UNDEF)); + + fun present_transaction f g = add_trans (Transaction (f, g)); + fun transaction f = present_transaction f (K ()); + *) + +Toplevel.theory : (theory -> theory) -> Toplevel.transition -> Toplevel.transition; + (* fun theory' f = transaction (fn int => + (fn Theory (Context.Theory thy, _) => + let val thy' = thy + |> Sign.new_group + |> f int + |> Sign.reset_group; + in Theory (Context.Theory thy', NONE) end + | _ => raise UNDEF)); + + fun theory f = theory' (K f); *) + +\ + + +ML\ + + +(* Isar Toplevel Steuerung *) +Toplevel.keep : (Toplevel.state -> unit) -> Toplevel.transition -> Toplevel.transition; + (* val keep' = add_trans o Keep; + fun keep f = add_trans (Keep (fn _ => f)); + *) + +Toplevel.present_local_theory : (xstring * Position.T) option -> (Toplevel.state -> unit) -> + Toplevel.transition -> Toplevel.transition; + (* fun present_local_theory target = present_transaction (fn int => + (fn Theory (gthy, _) => + let val (finish, lthy) = Named_Target.switch target gthy; + in Theory (finish lthy, SOME lthy) end + | _ => raise UNDEF)); + + fun present_transaction f g = add_trans (Transaction (f, g)); + fun transaction f = present_transaction f (K ()); + *) + +Toplevel.theory : (theory -> theory) -> Toplevel.transition -> Toplevel.transition; + (* fun theory' f = transaction (fn int => + (fn Theory (Context.Theory thy, _) => + let val thy' = thy + |> Sign.new_group + |> f int + |> Sign.reset_group; + in Theory (Context.Theory thy', NONE) end + | _ => raise UNDEF)); + + fun theory f = theory' (K f); *) + + +\ + + +subsection\ Configuration flags of fixed type in the Isar-engine. \ +text\The toplevel also provides an infrastructure for managing configuration options +for system components. Based on a sum-type @{ML_type Config.value } +with the alternatives \<^verbatim>\ Bool of bool | Int of int | Real of real | String of string\ +and building the parametric configuration types @{ML_type "'a Config.T" } and the +instance \<^verbatim>\type raw = value T\, for all registered configurations the protocol: +\ +ML\ + Config.get: Proof.context -> 'a Config.T -> 'a; + Config.map: 'a Config.T -> ('a -> 'a) -> Proof.context -> Proof.context; + Config.put: 'a Config.T -> 'a -> Proof.context -> Proof.context; + Config.get_global: theory -> 'a Config.T -> 'a; + Config.map_global: 'a Config.T -> ('a -> 'a) -> theory -> theory; + Config.put_global: 'a Config.T -> 'a -> theory -> theory; +\ +text\... etc. is defined.\ + +text\Example registration of an config attribute XS232: \ +ML\ +val (XS232, XS232_setup) + = Attrib.config_bool \<^binding>\XS232\ (K false); + +val _ = Theory.setup XS232_setup; +\ + +(* or: just command: + +setup\XS232_setup\ + +*) + +text\Another mechanism are global synchronised variables:\ +ML\ (* For example *) + +val C = Synchronized.var "Pretty.modes" "latEEex"; +(* Synchronized: a mechanism to bookkeep global + variables with synchronization mechanism included *) +Synchronized.value C; +\ + +chapter\Front-End \ +text\In the following chapter, we turn to the right part of the system architecture +shown in @{docitem \architecture\}: +The PIDE ("Prover-IDE") layer consisting of a part written in SML and another in SCALA. +Roughly speaking, PIDE implements "continuous build - continuous check" - functionality +over a textual, albeit generic document model. It transforms user modifications +of text elements in an instance of this model into increments (edits) and communicates +them to the Isabelle system. The latter reacts by the creation of a multitude of light-weight +reevaluation threads resulting in an asynchronous stream of markup that is used to annotate text +elements. Such markup is used to highlight, e.g., variables +or keywords with specific colors, to hyper-linking bound variables to their defining occurrences, +or to annotate type-information to terms which becomes displayed by specific +user-gestures on demand (hovering), etc. +Note that PIDE is not an editor, it is the framework that +coordinates these asynchronous information streams and optimizes it to a certain +extent (outdated markup referring to modified text is dropped, and +corresponding re-calculations are oriented to the user focus, for example). +Four concrete editors --- also called PIDE applications --- have been implemented: +\<^enum>an Eclipse plugin (developped by an Edinburg-group, based on an very old PIDE version), +\<^enum>a Visual-Studio Code plugin (developed by Makarius Wenzel), + currently based on a fairly old PIDE version, +\<^enum>clide, a web-client supporting javascript and HTML5 + (developed by a group at University Bremen, based on a very old PIDE version), and +\<^enum>the most commonly used: the plugin in JEdit - Editor, + (developed by Makarius Wenzel, current PIDE version.) +\ + +text\The document model forsees a number of text files, which are organized in form of an acyclic graph. Such graphs can be +grouped into \<^emph>\sessions\ and "frozen" to binaries in order to avoid long compilation +times. Text files have an abstract name serving as identity (the mapping to file-paths +in an underlying file-system is done in an own build management). +The primary format of the text files is \<^verbatim>\.thy\ (historically for: theory), +secondary formats can be \<^verbatim>\.sty\,\<^verbatim>\.tex\, \<^verbatim>\.png\, \<^verbatim>\.pdf\, or other files processed +by Isabelle and listed in a configuration processed by the build system. +\ +figure*[fig3::figure, relative_width="100",src="''figures/document-model''"] + \A Theory-Graph in the Document Model\ +text\A \<^verbatim>\.thy\ file consists of a \<^emph>\header\, a \<^emph>\context-definition\ and +a \<^emph>\body\ consisting of a sequence of \<^emph>\commands\. Even the header consists of +a sequence of commands used for introductory text elements not depending on any context +information (so: practically excluding any form of text antiquotation (see above)). +The context-definition contains an \<^emph>\import\ and a \<^emph>\keyword\ section; +for example: +\begin{verbatim} +theory Isa_DOF (* Isabelle Document Ontology Framework *) + imports Main + RegExpInterface (* Interface to functional regular automata for monitoring *) + Assert + + keywords "+=" ":=" "accepts" "rejects" +\end{verbatim} +where \<^verbatim>\Isa_DOF\ is the abstract name of the text-file, \<^verbatim>\Main\ etc. refer to imported +text files (recall that the import relation must be acyclic). \<^emph>\keyword\s are used to separate +commands form each other; +predefined commands allow for the dynamic creation of new commands similarly +to the definition of new functions in an interpreter shell (or: toplevel, see above.). +A command starts with a pre-declared keyword and specific syntax of this command; +the declaration of a keyword is only allowed in the same \<^verbatim>\.thy\-file where the +the corresponding new command is defined. The semantics of the command is expressed +in ML and consists of a @{ML_type "Toplevel.transition -> Toplevel.transition"} +function. Thus, the Isar-toplevel supports the generic document model +and allows for user-programmed extensions. +\ + +text\In the traditional literature, Isabelle \<^verbatim>\.thy\-files were +were said to be processed by processed by two types of parsers: +\<^enum> the "outer-syntax" (i.e. the syntax for commands) is processed + by a lexer-library and parser combinators built on top, and +\<^enum> the "inner-syntax" (i.e. the syntax for @{term \\\} - terms) + with an evolved, eight-layer parsing and pretty-printing process + based on an Early-algorithm. +\ + +text\This picture is less and less true for a number of reasons: +\<^enum> With the advent of \(\)... (\)\, a mechanism for + \<^emph>\cascade-syntax\ came to the Isabelle platform that introduce a flexible means + to change parsing contexts \<^emph>\and\ parsing technologies. +\<^enum> Inside the term-parser levels, the concept of \<^emph>\cartouche\ can be used + to escape the parser and its underlying parsing technology. +\<^enum> Outside, in the traditional toplevel-parsers, the + \(\)... (\)\ is becoming more and more enforced + (some years ago, syntax like \term{* ... *}\ was replaced by + syntax \term(\)... (\)\. This makes technical support of cascade syntax + more and more easy. +\<^enum> The Lexer infra-structure is already rather generic; nothing prevents to + add beside the lexer - configurations for ML-Parsers, Toplevel Command Syntax + parsers, mathematical notation parsers for $\lambda$-terms new pillars + of parsing technologies, say, for parsing C or Rust or JavaScript inside + Isabelle. +\ + + +section\Basics: string, bstring and xstring\ +text\@{ML_type "string"} is the basic library type from the SML library +in structure @{ML_structure "String"}. Many Isabelle operations produce +or require formats thereof introduced as type synonyms +@{ML_type "bstring"} (defined in structure @{ML_structure "Binding"} +and @{ML_type "xstring"} (defined in structure @{ML_structure "Name_Space"}. +Unfortunately, the abstraction is not tight and combinations with +elementary routines might produce quite crappy results. +\ + +ML\val b = Binding.name_of@{binding "here"}\ +text\... produces the system output \verb+val it = "here": bstring+, + but note that it is misleading to believe it is just a string. +\ + +ML\String.explode b\ (* is harmless, but *) +ML\String.explode(Binding.name_of +(Binding.conglomerate[Binding.qualified_name "X.x", + @{binding "here"}] ))\ +(* Somehow it is possible to smuggle markup into bstrings; and this leads + ML\(((String.explode(!ODL_Command_Parser.SPY6))))\ + to something like + val it = [#"\^E", #"\^F", #"i", #"n", #"p", #"u", #"t", #"\^F", #"d", #"e", #"l", #"i", #"m", #"i", #"t", #"e", #"d", #"=", #"f", #"a", ...]: char list +*) + +text\ However, there is an own XML parser for this format. See Section Markup. +\ +ML\ fun dark_matter x = XML.content_of (YXML.parse_body x)\ + +(* MORE TO COME *) + +section\Positions\ +text\A basic data-structure relevant for PIDE are \<^emph>\positions\; beyond the usual line- and column +information they can represent ranges, list of ranges, and the name of the atomic sub-document +in which they are contained. In the command:\ +ML\ +val pos = @{here}; +val markup = Position.here pos; +writeln ("And a link to the declaration of 'here' is "^markup) +\ +(* \<^here> *) +text\ ... uses the antiquotation @{ML "@{here}"} to infer from the system lexer the actual position +of itself in the global document, converts it to markup (a string-representation of it) and sends +it via the usual @{ML "writeln"} to the interface. \ + +figure*[hyplinkout::figure,relative_width="40",src="''figures/markup-demo''"] +\Output with hyperlinked position.\ + +text\@{docitem \hyplinkout\} shows the produced output where the little house-like symbol in the +display is hyperlinked to the position of @{ML "@{here}"} in the ML sample above.\ + +section\Markup and Low-level Markup Reporting\ +text\The structures @{ML_structure Markup} and @{ML_structure Properties} represent the basic +annotation data which is part of the protocol sent from Isabelle to the frontend. +They are qualified as "quasi-abstract", which means they are intended to be an abstraction of +the serialized, textual presentation of the protocol. Markups are structurally a pair of a key +and properties; @{ML_structure Markup} provides a number of of such \<^emph>\key\s for annotation classes +such as "constant", "fixed", "cartouche", some of them quite obscure. Here is a code sample +from \<^theory_text>\Isabelle_DOF\. A markup must be tagged with an id; this is done by the @{ML serial}-function +discussed earlier.\ +subsection\A simple Example\ +ML\ +local + +val docclassN = "doc_class"; + +(* derived from: theory_markup; def for "defining occurrence" (true) in contrast to + "referring occurence" (false). *) +fun docclass_markup def name id pos = + if id = 0 then Markup.empty + else Markup.properties (Position.entity_properties_of def id pos) + (Markup.entity docclassN name); + +in + +fun report_defining_occurrence pos cid = + let val id = serial () + val markup_of_cid = docclass_markup true cid id pos + in Position.report pos markup_of_cid end; + +end +\ + +text\The @\ML report_defining_occurrence\-function above takes a position and a "cid" parsed +in the Front-End, converts this into markup together with a unique number identifying this +markup, and sends this as a report to the Front-End. \ + + +subsection\A Slightly more Complex Example\ + +ML \ + +fun markup_tvar def_name ps (name, id) = + let + fun markup_elem name = (name, (name, []): Markup.T); + val (tvarN, tvar) = markup_elem ((case def_name of SOME name => name | _ => "") ^ "'s nickname is"); + val entity = Markup.entity tvarN name + val def = def_name = NONE + in + tvar :: + (if def then I else cons (Markup.keyword_properties Markup.ML_keyword3)) + (map (fn pos => Markup.properties (Position.entity_properties_of def id pos) entity) ps) + end + +fun report [] _ _ = I + | report ps markup x = + let val ms = markup x + in fold (fn p => fold (fn m => cons ((p, m), "")) ms) ps end +\ + +ML \ +local +val data = \ \Derived from Yakoub's example ;-)\ + + [ (\Frédéric 1er\, \King of Naples\) + , (\Frédéric II\, \King of Sicily\) + , (\Frédéric III\, \the Handsome\) + , (\Frédéric IV\, \of the Empty Pockets\) + , (\Frédéric V\, \King of Denmark–Norway\) + , (\Frédéric VI\, \the Knight\) + , (\Frédéric VII\, \Count of Toggenburg\) + , (\Frédéric VIII\, \Count of Zollern\) + , (\Frédéric IX\, \the Old\) + , (\Frédéric X\, \the Younger\) ] + +val (tab0, markup) = + fold_map (fn (name, msg) => fn reports => + let val id = serial () + val pos = [Input.pos_of name] + in + ( (fst(Input.source_content msg), (name, pos, id)) + , report pos (markup_tvar NONE pos) (fst(Input.source_content name), id) reports) + end) + data + [] + +val () = Position.reports_text markup +in +val tab = Symtab.make tab0 +end +\ + +ML \ +val _ = + fold (fn input => + let + val pos1' = Input.pos_of input + fun ctnt name0 = fst(Input.source_content name0) + val pos1 = [pos1'] + val msg1 = fst(Input.source_content input) + val msg2 = "No persons were found to have such nickname" + in + case Symtab.lookup tab (fst(Input.source_content input)) of + NONE => tap (fn _ => Output.information (msg2 ^ Position.here_list pos1)) + (cons ((pos1', Markup.bad ()), "")) + | SOME (name0, pos0, id) => report pos1 (markup_tvar (SOME (ctnt name0)) pos0) (msg1, id) + end) + [ \the Knight\ \ \Example of a correct retrieval (CTRL + Hovering shows what we are expecting)\ + , \the Handsome\ \ \Example of a correct retrieval (CTRL + Hovering shows what we are expecting)\ + , \the Spy\ \ \Example of a failure to retrieve the person in \<^ML>\tab\\ + ] + [] + |> Position.reports_text +\ + +text\The pudding comes with the eating: \ + +subsection\Environment Structured Reporting\ + +text\ The structure @{ML_structure \Name_Space\} offers an own infra-structure for names and +manages the markup accordingly. MORE TO COME\ +text\ @{ML_type "'a Name_Space.table"} \ + + +section\ The System Lexer and Token Issues\ +text\Four syntactic contexts are predefined in Isabelle (others can be added): +the ML context, the text context, the Isar-command context and the teerm-context, referring +to different needs of the Isabelle Framework as an extensible framework supporting incremental, +partially programmable extensions and as a Framework geared towards Formal Proofs and therefore +mathematical notations. The basic data-structure for the lexical treatment of these + +\ + +subsection\Tokens\ + +text\The basic entity that lexers treat are \<^emph>\tokens\. defined in @{ML_structure "Token"} +It provides a classification infrastructure, the references to positions and Markup +as well as way's to annotate tokens with (some) values they denote: +\ + +ML\ +local + open Token + + type dummy = Token.T + type src = Token.T list + type file = {src_path: Path.T, lines: string list, digest: SHA1.digest, pos: Position.T} + + type name_value = {name: string, kind: string, print: Proof.context -> Markup.T * xstring} + + + val _ = Token.is_command : Token.T -> bool; + val _ = Token.content_of : Token.T -> string; (* textueller kern eines Tokens. *) + + + val _ = pos_of: T -> Position.T + +(* +datatype kind = + (*immediate source*) + Command | Keyword | Ident | Long_Ident | Sym_Ident | Var | Type_Ident | Type_Var | Nat | + Float | Space | + (*delimited content*) + String | Alt_String | Verbatim | Cartouche | Comment of Comment.kind option | + (*special content*) + Error of string | EOF + + datatype value = + Source of src | + Literal of bool * Markup.T | + Name of name_value * morphism | + Typ of typ | + Term of term | + Fact of string option * thm list | + Attribute of morphism -> attribute | + Declaration of declaration | + Files of file Exn.result list + + +*) +in val _ = () +end +\ + + + +subsection\A Lexer Configuration Example\ + +ML\ + +\ + + +section\ Combinator Parsing \ +text\Parsing Combinators go back to monadic programming as advocated by Wadler et. al, and has been +worked out @{cite "DBLP:journals/jfp/Hutton92"}. Parsing combinators are one of the two +major parsing technologies of the Isabelle front-end, in particular for the outer-syntax used +for the parsing of toplevel-commands. The core of the combinator library is +@{ML_structure \Scan\} providing the @{ML_type "'a parser"} which is a synonym for +@{ML_type " Token.T list -> 'a * Token.T list"}. The library also provides a bunch of +infix parsing combinators, notably:\ + +ML\ + val _ = op !! : ('a * message option -> message) -> ('a -> 'b) -> 'a -> 'b + (*apply function*) + val _ = op >> : ('a -> 'b * 'c) * ('b -> 'd) -> 'a -> 'd * 'c + (*alternative*) + val _ = op || : ('a -> 'b) * ('a -> 'b) -> 'a -> 'b + (*sequential pairing*) + val _ = op -- : ('a -> 'b * 'c) * ('c -> 'd * 'e) -> 'a -> ('b * 'd) * 'e + (*dependent pairing*) + val _ = op :-- : ('a -> 'b * 'c) * ('b -> 'c -> 'd * 'e) -> 'a -> ('b * 'd) * 'e + (*projections*) + val _ = op :|-- : ('a -> 'b * 'c) * ('b -> 'c -> 'd * 'e) -> 'a -> 'd * 'e + val _ = op |-- : ('a -> 'b * 'c) * ('c -> 'd * 'e) -> 'a -> 'd * 'e + val _ = op --| : ('a -> 'b * 'c) * ('c -> 'd * 'e) -> 'a -> 'b * 'e + (*concatenation*) + val _ = op ^^ : ('a -> string * 'b) * ('b -> string * 'c) -> 'a -> string * 'c + val _ = op ::: : ('a -> 'b * 'c) * ('c -> 'b list * 'd) -> 'a -> 'b list * 'd + val _ = op @@@ : ('a -> 'b list * 'c) * ('c -> 'b list * 'd) -> 'a -> 'b list * 'd + (*one element literal*) + val _ = op $$ : string -> string list -> string * string list + val _ = op ~$$ : string -> string list -> string * string list + \ + +text\Usually, combinators were used in one of these following instances:\ + +ML\ + + type 'a parser = Token.T list -> 'a * Token.T list + type 'a context_parser = Context.generic * Token.T list -> 'a * (Context.generic * Token.T list) + + +(* conversion between these two : *) + +fun parser2contextparser pars (ctxt, toks) = let val (a, toks') = pars toks + in (a,(ctxt, toks')) end; +val _ = parser2contextparser : 'a parser -> 'a context_parser; + +(* bah, is the same as Scan.lift *) +val _ = Scan.lift Args.cartouche_input : Input.source context_parser; + + +\ + +subsection\ Bindings \ + + +ML\ +val H = @{binding here}; (* There are "bindings" consisting of a text-span and a position, + where \positions\ are absolute references to a file *) + +Binding.make: bstring * Position.T -> binding; +Binding.pos_of @{binding erzerzer}; +Position.here: Position.T -> string; +(* Bindings *) +ML\val X = @{here};\ + +\ + +subsection \Input streams. \ +ML\ + Input.source_explode : Input.source -> Symbol_Pos.T list; + (* conclusion: Input.source_explode converts " f @{thm refl}" + into: + [(" ", {offset=14, id=-2769}), ("f", {offset=15, id=-2769}), (" ", {offset=16, id=-2769}), + ("@", {offset=17, id=-2769}), ("{", {offset=18, id=-2769}), ("t", {offset=19, id=-2769}), + ("h", {offset=20, id=-2769}), ("m", {offset=21, id=-2769}), (" ", {offset=22, id=-2769}), + ("r", {offset=23, id=-2769}), ("e", {offset=24, id=-2769}), ("f", {offset=25, id=-2769}), + ("l", {offset=26, id=-2769}), ("}", {offset=27, id=-2769})] + *)\ + +subsection \Scanning and combinator parsing. \ +text\Is used on two levels: +\<^enum> outer syntax, that is the syntax in which Isar-commands are written, and +\<^enum> inner-syntax, that is the syntax in which lambda-terms, and in particular HOL-terms were written. +\ +text\ A constant problem for newbies is this distinction, which makes it necessary that + the " ... " quotes have to be used when switching to inner-syntax, except when only one literal + is expected when an inner-syntax term is expected. +\ + +ML\ +Scan.peek : ('a -> 'b -> 'c * 'd) -> 'a * 'b -> 'c * ('a * 'd); +Scan.optional: ('a -> 'b * 'a) -> 'b -> 'a -> 'b * 'a; +Scan.option: ('a -> 'b * 'a) -> 'a -> 'b option * 'a; +Scan.repeat: ('a -> 'b * 'a) -> 'a -> 'b list * 'a; +Scan.lift : ('a -> 'b * 'c) -> 'd * 'a -> 'b * ('d * 'c); +Scan.lift (Parse.position Args.cartouche_input); +\ + +text\ "parsers" are actually interpreters; an 'a parser is a function that parses + an input stream and computes(=evaluates, computes) it into 'a. + Since the semantics of an Isabelle command is a transition => transition + or theory $\Rightarrow$ theory function, i.e. a global system transition. + parsers of that type can be constructed and be bound as call-back functions + to a table in the Toplevel-structure of Isar. + + The type 'a parser is already defined in the structure Token. +\ + +text\ Syntax operations : Interface for parsing, type-checking, "reading" + (both) and pretty-printing. + Note that this is a late-binding interface, i.e. a collection of "hooks". + The real work is done ... see below. + + Encapsulates the data structure "syntax" --- the table with const symbols, + print and ast translations, ... The latter is accessible, e.g. from a Proof + context via @{ML Proof_Context.syn_of}. +\ + +ML\ +Parse.nat: int parser; +Parse.int: int parser; +Parse.enum_positions: string -> 'a parser -> ('a list * Position.T list) parser; +Parse.enum: string -> 'a parser -> 'a list parser; +Parse.input: 'a parser -> Input.source parser; + +Parse.enum : string -> 'a parser -> 'a list parser; +Parse.!!! : (Token.T list -> 'a) -> Token.T list -> 'a; +Parse.position: 'a parser -> ('a * Position.T) parser; + +(* Examples *) +Parse.position Args.cartouche_input; +\ + +text\ Inner Syntax Parsing combinators for elementary Isabelle Lexems\ +ML\ +Syntax.parse_sort : Proof.context -> string -> sort; +Syntax.parse_typ : Proof.context -> string -> typ; +Syntax.parse_term : Proof.context -> string -> term; +Syntax.parse_prop : Proof.context -> string -> term; +Syntax.check_term : Proof.context -> term -> term; +Syntax.check_props: Proof.context -> term list -> term list; +Syntax.uncheck_sort: Proof.context -> sort -> sort; +Syntax.uncheck_typs: Proof.context -> typ list -> typ list; +Syntax.uncheck_terms: Proof.context -> term list -> term list;\ + +text\In contrast to mere parsing, the following operators provide also type-checking + and internal reporting to PIDE --- see below. I did not find a mechanism to address + the internal serial-numbers used for the PIDE protocol, however, rumours have it + that such a thing exists. The variants \<^verbatim>\_global\ work on theories instead on + @{ML_type "Proof.context"}s.\ + +ML\ +Syntax.read_sort: Proof.context -> string -> sort; +Syntax.read_typ : Proof.context -> string -> typ; +Syntax.read_term: Proof.context -> string -> term; +Syntax.read_typs: Proof.context -> string list -> typ list; +Syntax.read_sort_global: theory -> string -> sort; +Syntax.read_typ_global: theory -> string -> typ; +Syntax.read_term_global: theory -> string -> term; +Syntax.read_prop_global: theory -> string -> term; +\ +text \The following operations are concerned with the conversion of pretty-prints +and, from there, the generation of (non-layouted) strings.\ +ML\ +Syntax.pretty_term:Proof.context -> term -> Pretty.T; +Syntax.pretty_typ:Proof.context -> typ -> Pretty.T; +Syntax.pretty_sort:Proof.context -> sort -> Pretty.T; +Syntax.pretty_classrel: Proof.context -> class list -> Pretty.T; +Syntax.pretty_arity: Proof.context -> arity -> Pretty.T; +Syntax.string_of_term: Proof.context -> term -> string; +Syntax.string_of_typ: Proof.context -> typ -> string; +Syntax.lookup_const : Syntax.syntax -> string -> string option; +\ + +ML\ +fun read_terms ctxt = + grouped 10 Par_List.map_independent (Syntax.parse_term ctxt) #> Syntax.check_terms ctxt; +\ + +ML\ +(* More High-level, more Isar-specific Parsers *) +Args.name; +Args.const; +Args.cartouche_input: Input.source parser; +Args.text_token: Token.T parser; + +val Z = let val attribute = Parse.position Parse.name -- + Scan.optional (Parse.$$$ "=" |-- Parse.!!! Parse.name) ""; + in (Scan.optional(Parse.$$$ "," |-- (Parse.enum "," attribute))) end ; +(* this leads to constructions like the following, where a parser for a *) + + + +Thy_Output.antiquotation_pretty_source \<^binding>\theory\ (Scan.lift (Parse.position Args.embedded)); + +Thy_Output.antiquotation_raw \<^binding>\file\ (Scan.lift (Parse.position Parse.path)) ; + +fn name => (Thy_Output.antiquotation_pretty_source name (Scan.lift (Parse.position Args.cartouche_input))); +\ + + +section\ The PIDE Framework \ +subsection\ Markup \ +text\ Markup Operations, and reporting. Diag in Isa\_DOF Foundations TR. + Markup operation send via side-effect annotations to the GUI (precisely: + to the PIDE Framework) that were used for hyperlinking applicating to binding + occurrences, info for hovering, ... \ +ML\ + +(* Position.report is also a type consisting of a pair of a position and markup. *) +(* It would solve all my problems if I find a way to infer the defining Position.report + from a type definition occurence ... *) + +Position.report: Position.T -> Markup.T -> unit; +Position.reports: Position.report list -> unit; + (* ? ? ? I think this is the magic thing that sends reports to the GUI. *) +Markup.entity : string -> string -> Markup.T; +Markup.properties : Properties.T -> Markup.T -> Markup.T ; +Properties.get : Properties.T -> string -> string option; +Markup.enclose : Markup.T -> string -> string; + +(* example for setting a link, the def flag controls if it is a defining or a binding +occurence of an item *) +fun theory_markup (def:bool) (name:string) (id:serial) (pos:Position.T) = + if id = 0 then Markup.empty + else + Markup.properties (Position.entity_properties_of def id pos) + (Markup.entity Markup.theoryN name); +Markup.theoryN : string; + +serial(); (* A global, lock-guarded seriel counter used to produce unique identifiers, + be it on the level of thy-internal states or as reference in markup in + PIDE *) + +(* From Theory: +fun check ctxt (name, pos) = + let + val thy = Proof_Context.theory_of ctxt; + val thy' = + Context.get_theory thy name + handle ERROR msg => + let + val completion = + Completion.make (name, pos) + (fn completed => + map Context.theory_name (ancestors_of thy) + |> filter completed + |> sort_strings + |> map (fn a => (a, (Markup.theoryN, a)))); + val report = Markup.markup_report (Completion.reported_text completion); + in error (msg ^ Position.here pos ^ report) end; + val _ = Context_Position.report ctxt pos (get_markup thy'); + in thy' end; + +fun init_markup (name, pos) thy = + let + val id = serial (); + val _ = Position.report pos (theory_markup true name id pos); + in map_thy (fn (_, _, axioms, defs, wrappers) => (pos, id, axioms, defs, wrappers)) thy end; + +fun get_markup thy = + let val {pos, id, ...} = rep_theory thy + in theory_markup false (Context.theory_name thy) id pos end; + + +*) + +(* +fun theory_markup def thy_name id pos = + if id = 0 then Markup.empty + else + Markup.properties (Position.entity_properties_of def id pos) + (Markup.entity Markup.theoryN thy_name); + +fun get_markup thy = + let val {pos, id, ...} = rep_theory thy + in theory_markup false (Context.theory_name thy) id pos end; + + +fun init_markup (name, pos) thy = + let + val id = serial (); + val _ = Position.report pos (theory_markup true name id pos); + in map_thy (fn (_, _, axioms, defs, wrappers) => (pos, id, axioms, defs, wrappers)) thy end; + + +fun check ctxt (name, pos) = + let + val thy = Proof_Context.theory_of ctxt; + val thy' = + Context.get_theory thy name + handle ERROR msg => + let + val completion = + Completion.make (name, pos) + (fn completed => + map Context.theory_name (ancestors_of thy) + |> filter completed + |> sort_strings + |> map (fn a => (a, (Markup.theoryN, a)))); + val report = Markup.markup_report (Completion.reported_text completion); + in error (msg ^ Position.here pos ^ report) end; + val _ = Context_Position.report ctxt pos (get_markup thy'); + in thy' end; + + +*) + +\ + + + +section \ Output: Very Low Level \ +ML\ +Output.output; (* output is the structure for the "hooks" with the target devices. *) +Output.output "bla_1:"; +\ + +section \ Output: LaTeX \ + + +ML\ open Thy_Output; + + output_document: Proof.context -> {markdown: bool} -> Input.source -> Latex.text list; + output_token: Proof.context -> Token.T -> Latex.text list; + output_source: Proof.context -> string -> Latex.text list; + present_thy: Options.T -> theory -> segment list -> Latex.text list; + pretty_term: Proof.context -> term -> Pretty.T; + pretty_thm: Proof.context -> thm -> Pretty.T; + lines: Latex.text list -> Latex.text list; + items: Latex.text list -> Latex.text list; + isabelle: Proof.context -> Latex.text list -> Latex.text; + isabelle_typewriter: Proof.context -> Latex.text list -> Latex.text; + typewriter: Proof.context -> string -> Latex.text; + verbatim: Proof.context -> string -> Latex.text; + source: Proof.context -> {embedded: bool} -> Token.src -> Latex.text; + pretty: Proof.context -> Pretty.T -> Latex.text; + pretty_source: Proof.context -> {embedded: bool} -> Token.src -> Pretty.T -> Latex.text; + pretty_items: Proof.context -> Pretty.T list -> Latex.text; + pretty_items_source: Proof.context -> {embedded: bool} -> Token.src -> Pretty.T list -> Latex.text; + antiquotation_pretty: + binding -> 'a context_parser -> (Proof.context -> 'a -> Pretty.T) -> theory -> theory; + antiquotation_pretty_source: + binding -> 'a context_parser -> (Proof.context -> 'a -> Pretty.T) -> theory -> theory; + antiquotation_raw: + binding -> 'a context_parser -> (Proof.context -> 'a -> Latex.text) -> theory -> theory; + antiquotation_verbatim: + binding -> 'a context_parser -> (Proof.context -> 'a -> string) -> theory -> theory; + +\ + + + +ML\ +Syntax_Phases.reports_of_scope; +\ + +(* STOP HERE JUNK ZONE : + +(* Pretty.T, pretty-operations. *) +ML\ + +(* interesting piece for LaTeX Generation: +fun verbatim_text ctxt = + if Config.get ctxt display then + split_lines #> map (prefix (Symbol.spaces (Config.get ctxt indent))) #> cat_lines #> + Latex.output_ascii #> Latex.environment "isabellett" + else + split_lines #> + map (Latex.output_ascii #> enclose "\\isatt{" "}") #> + space_implode "\\isasep\\isanewline%\n"; + +(* From structure Thy_Output *) +fun pretty_const ctxt c = + let + val t = Const (c, Consts.type_scheme (Proof_Context.consts_of ctxt) c) + handle TYPE (msg, _, _) => error msg; + val ([t'], _) = Variable.import_terms true [t] ctxt; + in pretty_term ctxt t' end; + + basic_entity @{binding const} (Args.const {proper = true, strict = false}) pretty_const #> + +*) + +Pretty.enclose : string -> string -> Pretty.T list -> Pretty.T; (* not to confuse with: String.enclose *) + +(* At times, checks where attached to Pretty - functions and exceptions used to + stop the execution/validation of a command *) +fun pretty_theory ctxt (name, pos) = (Theory.check ctxt (name, pos); Pretty.str name); +Pretty.enclose; +Pretty.str; +Pretty.mark_str; +Pretty.text "bla_d"; + +Pretty.quote; (* Pretty.T transformation adding " " *) +Pretty.unformatted_string_of : Pretty.T -> string ; + +Latex.output_ascii; +Latex.environment "isa" "bg"; +Latex.output_ascii "a_b:c'é"; +(* Note: *) +space_implode "sd &e sf dfg" ["qs","er","alpa"]; + + +(* +fun pretty_command (cmd as (name, Command {comment, ...})) = + Pretty.block + (Pretty.marks_str + ([Active.make_markup Markup.sendbackN {implicit = true, properties = [Markup.padding_line]}, + command_markup false cmd], name) :: Pretty.str ":" :: Pretty.brk 2 :: Pretty.text comment); +*) + + +\ + +ML\ +Thy_Output.output_text; +(* is: +fun output_text state {markdown} source = + let + val is_reported = + (case try Toplevel.context_of state of + SOME ctxt => Context_Position.is_visible ctxt + | NONE => true); + + val pos = Input.pos_of source; + val syms = Input.source_explode source; + + val _ = + if is_reported then + Position.report pos (Markup.language_document (Input.is_delimited source)) + else (); + + val output_antiquotes = map (eval_antiquote state) #> implode; + + fun output_line line = + (if Markdown.line_is_item line then "\\item " else "") ^ + output_antiquotes (Markdown.line_content line); + + fun output_blocks blocks = space_implode "\n\n" (map output_block blocks) + and output_block (Markdown.Par lines) = cat_lines (map output_line lines) + | output_block (Markdown.List {kind, body, ...}) = + Latex.environment (Markdown.print_kind kind) (output_blocks body); + in + if Toplevel.is_skipped_proof state then "" + else if markdown andalso exists (Markdown.is_control o Symbol_Pos.symbol) syms + then + let + val ants = Antiquote.parse pos syms; + val reports = Antiquote.antiq_reports ants; + val blocks = Markdown.read_antiquotes ants; + val _ = if is_reported then Position.reports (reports @ Markdown.reports blocks) else (); + in output_blocks blocks end + else + let + val ants = Antiquote.parse pos (Symbol_Pos.trim_blanks syms); + val reports = Antiquote.antiq_reports ants; + val _ = if is_reported then Position.reports (reports @ Markdown.text_reports ants) else (); + in output_antiquotes ants end + end; +*) + +\ + + +ML\ +Outer_Syntax.print_commands @{theory}; + +Outer_Syntax.command : Outer_Syntax.command_keyword -> string -> + (Toplevel.transition -> Toplevel.transition) parser -> unit; +(* creates an implicit thy_setup with an entry for a call-back function, which happens + to be a parser that must have as side-effect a Toplevel-transition-transition. + Registers "Toplevel.transition -> Toplevel.transition" parsers to the Isar interpreter. + *) + +(*Example: text is : + +val _ = + Outer_Syntax.command ("text", @{here}) "formal comment (primary style)" + (Parse.opt_target -- Parse.document_source >> Thy_Output.document_command {markdown = true}); +*) + +(* not exported: Thy_Output.output_token; Ich glaub, da passierts ... *) +Thy_Output.present_thy; +\ + +text\ Even the parsers and type checkers stemming from the theory-structure are registered via +hooks (this can be confusing at times). Main phases of inner syntax processing, with standard +implementations of parse/unparse operations were treated this way. +At the very very end in @{file "~~/src/Pure/Syntax/syntax_phases.ML"}, it sets up the entire syntax engine +(the hooks) via: +\ + + +(* +val _ = + Theory.setup + (Syntax.install_operations + {parse_sort = parse_sort, + parse_typ = parse_typ, + parse_term = parse_term false, + parse_prop = parse_term true, + unparse_sort = unparse_sort, + unparse_typ = unparse_typ, + unparse_term = unparse_term, + check_typs = check_typs, + check_terms = check_terms, + check_props = check_props, + uncheck_typs = uncheck_typs, + uncheck_terms = uncheck_terms}); +*) + +text\ Thus, Syntax\_Phases does the actual work, including + markup generation and generation of reports. Look at: \ +(* +fun check_typs ctxt raw_tys = + let + val (sorting_report, tys) = Proof_Context.prepare_sortsT ctxt raw_tys; + val _ = if Context_Position.is_visible ctxt then Output.report sorting_report else (); + in + tys + |> apply_typ_check ctxt + |> Term_Sharing.typs (Proof_Context.theory_of ctxt) + end; + +which is the real implementation behind Syntax.check_typ + +or: + +fun check_terms ctxt raw_ts = + let + val (sorting_report, raw_ts') = Proof_Context.prepare_sorts ctxt raw_ts; + val (ts, ps) = Type_Infer_Context.prepare_positions ctxt raw_ts'; + + val tys = map (Logic.mk_type o snd) ps; + val (ts', tys') = ts @ tys + |> apply_term_check ctxt + |> chop (length ts); + val typing_report = + fold2 (fn (pos, _) => fn ty => + if Position.is_reported pos then + cons (Position.reported_text pos Markup.typing + (Syntax.string_of_typ ctxt (Logic.dest_type ty))) + else I) ps tys' []; + + val _ = + if Context_Position.is_visible ctxt then Output.report (sorting_report @ typing_report) + else (); + in Term_Sharing.terms (Proof_Context.theory_of ctxt) ts' end; + +which is the real implementation behind Syntax.check_term + +As one can see, check-routines internally generate the markup. + +*) + + + +Consts.the_const; (* T is a kind of signature ... *) +Variable.import_terms; +Vartab.update; + +fun control_antiquotation name s1 s2 = + Thy_Output.antiquotation name (Scan.lift Args.cartouche_input) + (fn {state, ...} => enclose s1 s2 o Thy_Output.output_text state {markdown = false}); + +Output.output; + +Syntax.read_input ; +Input.source_content; + +(* + basic_entity @{binding const} (Args.const {proper = true, strict = false}) pretty_const #> +*) + + + + +chapter\LaTeX Document Generation\ +text\MORE TO COME\ + + +ML\ Thy_Output.document_command {markdown = true} \ +(* Structures related to LaTeX Generation *) +ML\ Latex.output_ascii; + + Latex.output_token +(* Hm, generierter output for +subsection*[Shaft_Encoder_characteristics]{ * Shaft Encoder Characteristics * } : + +\begin{isamarkuptext}% +\isa{{\isacharbackslash}label{\isacharbraceleft}general{\isacharunderscore}hyps{\isacharbraceright}}% +\end{isamarkuptext}\isamarkuptrue% +\isacommand{subsection{\isacharasterisk}}\isamarkupfalse% +{\isacharbrackleft}Shaft{\isacharunderscore}Encoder{\isacharunderscore}characteristics{\isacharbrackright}{\isacharverbatimopen}\ Shaft\ Encoder\ Characteristics\ {\isacharverbatimclose}% + +Generierter output for: text\\label{sec:Shaft-Encoder-characteristics}\ + +\begin{isamarkuptext}% +\label{sec:Shaft-Encoder-characteristics}% +\end{isamarkuptext}\isamarkuptrue% + + +*) + + +\ + +ML\ +Thy_Output.maybe_pretty_source : + (Proof.context -> 'a -> Pretty.T) -> Proof.context -> Token.src -> 'a list -> Pretty.T list; + +Thy_Output.output: Proof.context -> Pretty.T list -> string; + + (* nuescht besonderes *) + +fun document_antiq check_file ctxt (name, pos) = + let + (* val dir = master_directory (Proof_Context.theory_of ctxt); *) + (* val _ = check_path check_file ctxt dir (name, pos); *) + in + space_explode "/" name + |> map Latex.output_ascii + |> space_implode (Latex.output_ascii "/" ^ "\\discretionary{}{}{}") + |> enclose "\\isatt{" "}" + end; + +\ + +ML\ + +Thy_Output.output_text: Toplevel.state -> {markdown: bool} -> Input.source -> string; +Thy_Output.document_command; + +Thy_Output.document_command : {markdown: bool} -> (xstring * Position.T) option * Input.source -> + Toplevel.transition -> Toplevel.transition; + (* fun document_command markdown (loc, txt) = + Toplevel.keep (fn state => + (case loc of + NONE => ignore (output_text state markdown txt) + | SOME (_, pos) => + error ("Illegal target specification -- not a theory context" ^ Position.here pos))) o + Toplevel.present_local_theory loc (fn state => ignore (output_text state markdown txt)); + + *) + +Thy_Output.output_text : Toplevel.state -> {markdown: bool} -> Input.source -> string ; + (* this is where antiquotation expansion happens : uses eval_antiquote *) + + +Thy_Output.document_command : {markdown: bool} -> (xstring * Position.T) option * Input.source -> + Toplevel.transition -> Toplevel.transition; + (* fun document_command markdown (loc, txt) = + Toplevel.keep (fn state => + (case loc of + NONE => ignore (output_text state markdown txt) + | SOME (_, pos) => + error ("Illegal target specification -- not a theory context" ^ Position.here pos))) o + Toplevel.present_local_theory loc (fn state => ignore (output_text state markdown txt)); + + *) + +Thy_Output.output_text : Toplevel.state -> {markdown: bool} -> Input.source -> string ; + (* this is where antiquotation expansion happens : uses eval_antiquote *) + +*) +(* stuff over global environment : *) +(* +ML\ Document.state();\ +ML\ Session.get_keywords(); (* this looks to be really session global. *) + Outer_Syntax.command; \ +ML\ Thy_Header.get_keywords @{theory};(* this looks to be really theory global. *) \ +*) + + +section\Inner Syntax\ +text\MORE TO COME\ +ML\Sign.add_trrules\ + + +section*[c::conclusion]\Conclusion\ +text\More to come\ +section*[bib::bibliography]\Bibliography\ + +(*<*) +close_monitor*[this] +check_doc_global +(*>*) +end diff --git a/examples/technical_report/TR_my_commented_isabelle/document/root.bib b/examples/technical_report/TR_my_commented_isabelle/document/root.bib new file mode 100644 index 0000000..4eca4be --- /dev/null +++ b/examples/technical_report/TR_my_commented_isabelle/document/root.bib @@ -0,0 +1,528 @@ +@misc{bockenek:hal-02069705, + TITLE = {{Using Isabelle/UTP for the Verification of Sorting Algorithms A Case Study}}, + AUTHOR = {Bockenek, Joshua A and Lammich, Peter and Nemouchi, Yakoub and Wolff, Burkhart}, + URL = {https://easychair.org/publications/preprint/CxRV}, + NOTE = {Isabelle Workshop 2018, Colocated with Interactive Theorem Proving. As part of FLOC 2018, Oxford, GB.}, + YEAR = {2018}, + MONTH = Jul +} + +@book{DBLP:books/sp/NipkowPW02, + author = {Tobias Nipkow and + Lawrence C. Paulson and + Markus Wenzel}, + title = {Isabelle/HOL - {A} Proof Assistant for Higher-Order Logic}, + series = {Lecture Notes in Computer Science}, + volume = {2283}, + publisher = {Springer}, + year = {2002}, + url = {https://doi.org/10.1007/3-540-45949-9}, + deactivated_doi = {10.1007/3-540-45949-9}, + isbn = {3-540-43376-7}, + timestamp = {Tue, 14 May 2019 10:00:35 +0200}, + biburl = {https://dblp.org/rec/bib/books/sp/NipkowPW02}, + bibsource = {dblp computer science bibliography, https://dblp.org} +} + +@inproceedings{DBLP:conf/sosp/KleinEHACDEEKNSTW09, + author = {Gerwin Klein and + Kevin Elphinstone and + Gernot Heiser and + June Andronick and + David Cock and + Philip Derrin and + Dhammika Elkaduwe and + Kai Engelhardt and + Rafal Kolanski and + Michael Norrish and + Thomas Sewell and + Harvey Tuch and + Simon Winwood}, + title = {seL4: formal verification of an {OS} kernel}, + deactivated_booktitle = {Proceedings of the 22nd {ACM} Symposium on Operating Systems Principles + 2009, {SOSP} 2009, Big Sky, Montana, USA, October 11-14, 2009}, + pages = {207--220}, + year = {2009}, + crossref = {DBLP:conf/sosp/2009}, + url = {https://doi.org/10.1145/1629575.1629596}, + deactivated_doi = {10.1145/1629575.1629596}, + timestamp = {Tue, 06 Nov 2018 16:59:32 +0100}, + biburl = {https://dblp.org/rec/bib/conf/sosp/KleinEHACDEEKNSTW09}, + bibsource = {dblp computer science bibliography, https://dblp.org} +} + +@proceedings{DBLP:conf/sosp/2009, + editor = {Jeanna Neefe Matthews and + Thomas E. Anderson}, + title = {Proceedings of the 22nd {ACM} Symposium on Operating Systems Principles + 2009, {SOSP} 2009, Big Sky, Montana, USA, October 11-14, 2009}, + publisher = {{ACM}}, + year = {2009}, + url = {https://doi.org/10.1145/1629575}, + deactivated_doi = {10.1145/1629575}, + isbn = {978-1-60558-752-3}, + timestamp = {Tue, 06 Nov 2018 16:59:32 +0100}, + biburl = {https://dblp.org/rec/bib/conf/sosp/2009}, + bibsource = {dblp computer science bibliography, https://dblp.org} +} + +@inproceedings{DBLP:conf/tphol/CohenDHLMSST09, + author = {Ernie Cohen and + Markus Dahlweid and + Mark A. Hillebrand and + Dirk Leinenbach and + Michal Moskal and + Thomas Santen and + Wolfram Schulte and + Stephan Tobies}, + title = {{VCC:} {A} Practical System for Verifying Concurrent {C}}, + deactivated_booktitle = {Theorem Proving in Higher Order Logics, 22nd International Conference, + TPHOLs 2009, Munich, Germany, August 17-20, 2009. Proceedings}, + pages = {23--42}, + year = {2009}, + url = {https://doi.org/10.1007/978-3-642-03359-9_2}, + deactivated_doi = {10.1007/978-3-642-03359-9_2}, + timestamp = {Tue, 23 May 2017 01:12:08 +0200}, + biburl = {https://dblp.org/rec/bib/conf/tphol/CohenDHLMSST09}, + bibsource = {dblp computer science bibliography, https://dblp.org} +} + +@article{DBLP:journals/cacm/Leroy09, + author = {Xavier Leroy}, + title = {Formal verification of a realistic compiler}, + journal = {Commun. {ACM}}, + volume = {52}, + number = {7}, + pages = {107--115}, + year = {2009}, + url = {http://doi.acm.org/10.1145/1538788.1538814}, + deactivated_doi = {10.1145/1538788.1538814}, + timestamp = {Thu, 02 Jul 2009 13:36:32 +0200}, + biburl = {https://dblp.org/rec/bib/journals/cacm/Leroy09}, + bibsource = {dblp computer science bibliography, https://dblp.org} +} + +@inproceedings{DBLP:conf/itp/Wenzel14, + author = {Makarius Wenzel}, + title = {Asynchronous User Interaction and Tool Integration in Isabelle/PIDE}, + deactivated_booktitle = {Interactive Theorem Proving - 5th International Conference, {ITP} + 2014, Held as Part of the Vienna Summer of Logic, {VSL} 2014, Vienna, + Austria, July 14-17, 2014. Proceedings}, + pages = {515--530}, + year = {2014}, + crossref = {DBLP:conf/itp/2014}, + url = {https://doi.org/10.1007/978-3-319-08970-6\_33}, + deactivated_doi = {10.1007/978-3-319-08970-6\_33}, + timestamp = {Tue, 14 May 2019 10:00:37 +0200}, + biburl = {https://dblp.org/rec/bib/conf/itp/Wenzel14}, + bibsource = {dblp computer science bibliography, https://dblp.org} +} + +@proceedings{DBLP:conf/itp/2014, + editor = {Gerwin Klein and + Ruben Gamboa}, + title = {Interactive Theorem Proving - 5th International Conference, {ITP} + 2014, Held as Part of the Vienna Summer of Logic, {VSL} 2014, Vienna, + Austria, July 14-17, 2014. Proceedings}, + series = {Lecture Notes in Computer Science}, + volume = {8558}, + publisher = {Springer}, + year = {2014}, + url = {https://doi.org/10.1007/978-3-319-08970-6}, + deactivated_doi = {10.1007/978-3-319-08970-6}, + isbn = {978-3-319-08969-0}, + timestamp = {Tue, 14 May 2019 10:00:37 +0200}, + biburl = {https://dblp.org/rec/bib/conf/itp/2014}, + bibsource = {dblp computer science bibliography, https://dblp.org} +} + +@inproceedings{DBLP:journals/corr/Wenzel14, + author = {Makarius Wenzel}, + title = {System description: Isabelle/jEdit in 2014}, + deactivated_booktitle = {Proceedings Eleventh Workshop on User Interfaces for Theorem Provers, + {UITP} 2014, Vienna, Austria, 17th July 2014.}, + pages = {84--94}, + year = {2014}, + crossref = {DBLP:journals/corr/BenzmullerP14}, + url = {https://doi.org/10.4204/EPTCS.167.10}, + deactivated_doi = {10.4204/EPTCS.167.10}, + timestamp = {Wed, 12 Sep 2018 01:05:15 +0200}, + biburl = {https://dblp.org/rec/bib/journals/corr/Wenzel14}, + bibsource = {dblp computer science bibliography, https://dblp.org} +} + +@proceedings{DBLP:journals/corr/BenzmullerP14, + editor = {Christoph Benzm{\"{u}}ller and + Bruno {Woltzenlogel Paleo}}, + title = {Proceedings Eleventh Workshop on User Interfaces for Theorem Provers, + {UITP} 2014, Vienna, Austria, 17th July 2014}, + series = {{EPTCS}}, + volume = {167}, + year = {2014}, + url = {https://doi.org/10.4204/EPTCS.167}, + deactivated_doi = {10.4204/EPTCS.167}, + timestamp = {Wed, 12 Sep 2018 01:05:15 +0200}, + biburl = {https://dblp.org/rec/bib/journals/corr/BenzmullerP14}, + bibsource = {dblp computer science bibliography, https://dblp.org} +} + +@inproceedings{DBLP:conf/mkm/BarrasGHRTWW13, + author = {Bruno Barras and + Lourdes Del Carmen Gonz{\'{a}}lez{-}Huesca and + Hugo Herbelin and + Yann R{\'{e}}gis{-}Gianas and + Enrico Tassi and + Makarius Wenzel and + Burkhart Wolff}, + title = {Pervasive Parallelism in Highly-Trustable Interactive Theorem Proving + Systems}, + deactivated_booktitle = {Intelligent Computer Mathematics - MKM, Calculemus, DML, and Systems + and Projects 2013, Held as Part of {CICM} 2013, Bath, UK, July 8-12, + 2013. Proceedings}, + pages = {359--363}, + year = {2013}, + crossref = {DBLP:conf/mkm/2013}, + url = {https://doi.org/10.1007/978-3-642-39320-4\_29}, + deactivated_doi = {10.1007/978-3-642-39320-4\_29}, + timestamp = {Sun, 02 Jun 2019 21:17:34 +0200}, + biburl = {https://dblp.org/rec/bib/conf/mkm/BarrasGHRTWW13}, + bibsource = {dblp computer science bibliography, https://dblp.org} +} + +@proceedings{DBLP:conf/mkm/2013, + editor = {Jacques Carette and + David Aspinall and + Christoph Lange and + Petr Sojka and + Wolfgang Windsteiger}, + title = {Intelligent Computer Mathematics - MKM, Calculemus, DML, and Systems + and Projects 2013, Held as Part of {CICM} 2013, Bath, UK, July 8-12, + 2013. Proceedings}, + series = {Lecture Notes in Computer Science}, + volume = {7961}, + publisher = {Springer}, + year = {2013}, + url = {https://doi.org/10.1007/978-3-642-39320-4}, + deactivated_doi = {10.1007/978-3-642-39320-4}, + isbn = {978-3-642-39319-8}, + timestamp = {Sun, 02 Jun 2019 21:17:34 +0200}, + biburl = {https://dblp.org/rec/bib/conf/mkm/2013}, + bibsource = {dblp computer science bibliography, https://dblp.org} +} + +@article{DBLP:journals/afp/LammichW19, + author = {Peter Lammich and + Simon Wimmer}, + title = {{IMP2} - Simple Program Verification in Isabelle/HOL}, + journal = {Archive of Formal Proofs}, + volume = {2019}, + year = {2019}, + url = {https://www.isa-afp.org/entries/IMP2.html}, + timestamp = {Mon, 20 May 2019 11:45:07 +0200}, + biburl = {https://dblp.org/rec/bib/journals/afp/LammichW19}, + bibsource = {dblp computer science bibliography, https://dblp.org} +} + +@misc{frama-c-home-page, + title = {The Frama-C Home Page}, + author = {CEA LIST}, + year = 2019, + month = jan, + day = 10, + url = {https://frama-c.com}, + note = {Accessed \DTMdate{2019-03-24}} +} + +@inproceedings{DBLP:conf/fm/LeinenbachS09, + author = {Dirk Leinenbach and Thomas Santen}, + title = {Verifying the Microsoft Hyper-V Hypervisor with {VCC}}, + deactivated_booktitle = {{FM} 2009: Formal Methods, Second World Congress, Eindhoven, The Netherlands, + November 2-6, 2009. Proceedings}, + pages = {806--809}, + year = {2009}, + url = {https://doi.org/10.1007/978-3-642-05089-3_51}, + deactivated_doi = {10.1007/978-3-642-05089-3_51}, + timestamp = {Mon, 22 May 2017 17:11:19 +0200}, + biburl = {https://dblp.org/rec/bib/conf/fm/LeinenbachS09}, + bibsource = {dblp computer science bibliography, https://dblp.org} +} + +@inproceedings{DBLP:conf/tap/Keller18, + author = {Chantal Keller}, + title = {Tactic Program-Based Testing and Bounded Verification in Isabelle/HOL}, + deactivated_booktitle = {Tests and Proofs - 12th International Conference, {TAP} 2018, Held + as Part of {STAF} 2018, Toulouse, France, June 27-29, 2018, Proceedings}, + pages = {103--119}, + year = {2018}, + url = {https://doi.org/10.1007/978-3-319-92994-1\_6}, + deactivated_doi = {10.1007/978-3-319-92994-1\_6}, + timestamp = {Mon, 18 Jun 2018 13:57:50 +0200}, + biburl = {https://dblp.org/rec/bib/conf/tap/Keller18}, + bibsource = {dblp computer science bibliography, https://dblp.org} +} + +@inproceedings{DBLP:conf/itp/AissatVW16, + author = {Romain A{\"{\i}}ssat and + Fr{\'{e}}d{\'{e}}ric Voisin and + Burkhart Wolff}, + title = {Infeasible Paths Elimination by Symbolic Execution Techniques - Proof + of Correctness and Preservation of Paths}, + deactivated_booktitle = {Interactive Theorem Proving - 7th International Conference, {ITP} + 2016, Nancy, France, August 22-25, 2016, Proceedings}, + pages = {36--51}, + year = {2016}, + url = {https://doi.org/10.1007/978-3-319-43144-4\_3}, + deactivated_doi = {10.1007/978-3-319-43144-4\_3}, + timestamp = {Thu, 17 Aug 2017 16:22:01 +0200}, + biburl = {https://dblp.org/rec/bib/conf/itp/AissatVW16}, + bibsource = {dblp computer science bibliography, https://dblp.org} +} + +@article{DBLP:journals/tocs/KleinAEMSKH14, + author = {Gerwin Klein and + June Andronick and + Kevin Elphinstone and + Toby C. Murray and + Thomas Sewell and + Rafal Kolanski and + Gernot Heiser}, + title = {Comprehensive formal verification of an {OS} microkernel}, + journal = {{ACM} Trans. Comput. Syst.}, + volume = {32}, + number = {1}, + pages = {2:1--2:70}, + year = {2014}, + url = {http://doi.acm.org/10.1145/2560537}, + deactivated_doi = {10.1145/2560537}, + timestamp = {Tue, 03 Jan 2017 11:51:57 +0100}, + biburl = {https://dblp.org/rec/bib/journals/tocs/KleinAEMSKH14}, + bibsource = {dblp computer science bibliography, https://dblp.org} +} + +@inproceedings{DBLP:conf/pldi/GreenawayLAK14, + author = {David Greenaway and + Japheth Lim and + June Andronick and + Gerwin Klein}, + title = {Don't sweat the small stuff: formal verification of {C} code without + the pain}, + deactivated_booktitle = {{ACM} {SIGPLAN} Conference on Programming Language Design and Implementation, + {PLDI} '14, Edinburgh, United Kingdom - June 09 - 11, 2014}, + pages = {429--439}, + year = {2014}, + url = {http://doi.acm.org/10.1145/2594291.2594296}, + deactivated_doi = {10.1145/2594291.2594296}, + timestamp = {Tue, 20 Dec 2016 10:12:01 +0100}, + biburl = {https://dblp.org/rec/bib/conf/pldi/GreenawayLAK14}, + bibsource = {dblp computer science bibliography, https://dblp.org} +} + +@inproceedings{DBLP:conf/mkm/BruckerACW18, + author = {Achim D. Brucker and + Idir A{\"{\i}}t{-}Sadoune and + Paolo Crisafulli and + Burkhart Wolff}, + title = {Using the Isabelle Ontology Framework - Linking the Formal with the + Informal}, + deactivated_booktitle = {Intelligent Computer Mathematics - 11th International Conference, + {CICM} 2018, Hagenberg, Austria, August 13-17, 2018, Proceedings}, + pages = {23--38}, + year = {2018}, + url = {https://doi.org/10.1007/978-3-319-96812-4\_3}, + deactivated_doi = {10.1007/978-3-319-96812-4\_3}, + timestamp = {Sat, 11 Aug 2018 00:57:41 +0200}, + biburl = {https://dblp.org/rec/bib/conf/mkm/BruckerACW18}, + bibsource = {dblp computer science bibliography, https://dblp.org} +} + +@article{DBLP:journals/afp/TuongW15, + author = {Fr{\'{e}}d{\'{e}}ric Tuong and + Burkhart Wolff}, + title = {A Meta-Model for the Isabelle {API}}, + journal = {Archive of Formal Proofs}, + volume = {2015}, + year = {2015}, + url = {https://www.isa-afp.org/entries/Isabelle\_Meta\_Model.shtml}, + timestamp = {Mon, 07 Jan 2019 11:16:33 +0100}, + biburl = {https://dblp.org/rec/bib/journals/afp/TuongW15}, + bibsource = {dblp computer science bibliography, https://dblp.org} +} + +@inproceedings{DBLP:conf/tphol/WinwoodKSACN09, + author = {Simon Winwood and + Gerwin Klein and + Thomas Sewell and + June Andronick and + David Cock and + Michael Norrish}, + title = {Mind the Gap}, + deactivated_booktitle = {Theorem Proving in Higher Order Logics, 22nd International Conference, + TPHOLs 2009, Munich, Germany, August 17-20, 2009. Proceedings}, + pages = {500--515}, + year = {2009}, + crossref = {DBLP:conf/tphol/2009}, + url = {https://doi.org/10.1007/978-3-642-03359-9\_34}, + deactivated_doi = {10.1007/978-3-642-03359-9\_34}, + timestamp = {Fri, 02 Nov 2018 09:49:05 +0100}, + biburl = {https://dblp.org/rec/bib/conf/tphol/WinwoodKSACN09}, + bibsource = {dblp computer science bibliography, https://dblp.org} +} + +@proceedings{DBLP:conf/tphol/2009, + editor = {Stefan Berghofer and + Tobias Nipkow and + Christian Urban and + Makarius Wenzel}, + title = {Theorem Proving in Higher Order Logics, 22nd International Conference, + TPHOLs 2009, Munich, Germany, August 17-20, 2009. Proceedings}, + series = {Lecture Notes in Computer Science}, + volume = {5674}, + publisher = {Springer}, + year = {2009}, + url = {https://doi.org/10.1007/978-3-642-03359-9}, + deactivated_doi = {10.1007/978-3-642-03359-9}, + isbn = {978-3-642-03358-2}, + timestamp = {Tue, 23 May 2017 01:12:08 +0200}, + biburl = {https://dblp.org/rec/bib/conf/tphol/2009}, + bibsource = {dblp computer science bibliography, https://dblp.org} +} + +@article{DBLP:journals/afp/BruckerTW14, + author = {Achim D. Brucker and + Fr{\'{e}}d{\'{e}}ric Tuong and + Burkhart Wolff}, + title = {Featherweight {OCL:} {A} Proposal for a Machine-Checked Formal Semantics + for {OCL} 2.5}, + journal = {Archive of Formal Proofs}, + volume = {2014}, + year = {2014}, + url = {https://www.isa-afp.org/entries/Featherweight\_OCL.shtml}, + timestamp = {Mon, 07 Jan 2019 11:16:33 +0100}, + biburl = {https://dblp.org/rec/bib/journals/afp/BruckerTW14}, + bibsource = {dblp computer science bibliography, https://dblp.org} +} + +@inproceedings{DBLP:conf/tacas/SananZHZTL17, + author = {David San{\'{a}}n and + Yongwang Zhao and + Zhe Hou and + Fuyuan Zhang and + Alwen Tiu and + Yang Liu}, + title = {CSimpl: {A} Rely-Guarantee-Based Framework for Verifying Concurrent + Programs}, + deactivated_booktitle = {Tools and Algorithms for the Construction and Analysis of Systems + - 23rd International Conference, {TACAS} 2017, Held as Part of the + European Joint Conferences on Theory and Practice of Software, {ETAPS} + 2017, Uppsala, Sweden, April 22-29, 2017, Proceedings, Part {I}}, + pages = {481--498}, + year = {2017}, + crossref = {DBLP:conf/tacas/2017-1}, + url = {https://doi.org/10.1007/978-3-662-54577-5\_28}, + deactivated_doi = {10.1007/978-3-662-54577-5\_28}, + timestamp = {Mon, 18 Sep 2017 08:38:37 +0200}, + biburl = {https://dblp.org/rec/bib/conf/tacas/SananZHZTL17}, + bibsource = {dblp computer science bibliography, https://dblp.org} +} + +@proceedings{DBLP:conf/tacas/2017-1, + editor = {Axel Legay and + Tiziana Margaria}, + title = {Tools and Algorithms for the Construction and Analysis of Systems + - 23rd International Conference, {TACAS} 2017, Held as Part of the + European Joint Conferences on Theory and Practice of Software, {ETAPS} + 2017, Uppsala, Sweden, April 22-29, 2017, Proceedings, Part {I}}, + series = {Lecture Notes in Computer Science}, + volume = {10205}, + year = {2017}, + url = {https://doi.org/10.1007/978-3-662-54577-5}, + deactivated_doi = {10.1007/978-3-662-54577-5}, + isbn = {978-3-662-54576-8}, + timestamp = {Wed, 24 May 2017 08:28:32 +0200}, + biburl = {https://dblp.org/rec/bib/conf/tacas/2017-1}, + bibsource = {dblp computer science bibliography, https://dblp.org} +} + +@inproceedings{DBLP:conf/itp/HouSTL17, + author = {Zhe Hou and + David San{\'{a}}n and + Alwen Tiu and + Yang Liu}, + title = {Proof Tactics for Assertions in Separation Logic}, + deactivated_booktitle = {Interactive Theorem Proving - 8th International Conference, {ITP} + 2017, Bras{\'{\i}}lia, Brazil, September 26-29, 2017, Proceedings}, + pages = {285--303}, + year = {2017}, + crossref = {DBLP:conf/itp/2017}, + url = {https://doi.org/10.1007/978-3-319-66107-0\_19}, + deactivated_doi = {10.1007/978-3-319-66107-0\_19}, + timestamp = {Mon, 18 Sep 2017 08:38:37 +0200}, + biburl = {https://dblp.org/rec/bib/conf/itp/HouSTL17}, + bibsource = {dblp computer science bibliography, https://dblp.org} +} + +@proceedings{DBLP:conf/itp/2017, + editor = {Mauricio Ayala{-}Rinc{\'{o}}n and + C{\'{e}}sar A. Mu{\~{n}}oz}, + title = {Interactive Theorem Proving - 8th International Conference, {ITP} + 2017, Bras{\'{\i}}lia, Brazil, September 26-29, 2017, Proceedings}, + series = {Lecture Notes in Computer Science}, + volume = {10499}, + publisher = {Springer}, + year = {2017}, + url = {https://doi.org/10.1007/978-3-319-66107-0}, + deactivated_doi = {10.1007/978-3-319-66107-0}, + isbn = {978-3-319-66106-3}, + timestamp = {Wed, 06 Sep 2017 14:53:52 +0200}, + biburl = {https://dblp.org/rec/bib/conf/itp/2017}, + bibsource = {dblp computer science bibliography, https://dblp.org} +} + +@article{DBLP:journals/sigbed/CarrascosaCMBC14, + author = {E. Carrascosa and + Javier Coronel and + Miguel Masmano and + Patricia Balbastre and + Alfons Crespo}, + title = {XtratuM hypervisor redesign for {LEON4} multicore processor}, + journal = {{SIGBED} Review}, + volume = {11}, + number = {2}, + pages = {27--31}, + year = {2014}, + url = {https://doi.org/10.1145/2668138.2668142}, + deactivated_doi = {10.1145/2668138.2668142}, + timestamp = {Tue, 06 Nov 2018 12:51:31 +0100}, + biburl = {https://dblp.org/rec/bib/journals/sigbed/CarrascosaCMBC14}, + bibsource = {dblp computer science bibliography, https://dblp.org} +} + +@article{DBLP:journals/cacm/Earley70, + author = {Jay Earley}, + title = {An Efficient Context-Free Parsing Algorithm}, + journal = {Commun. {ACM}}, + volume = {13}, + number = {2}, + pages = {94--102}, + year = {1970}, + url = {https://doi.org/10.1145/362007.362035}, + deactivated_doi = {10.1145/362007.362035}, + timestamp = {Wed, 14 Nov 2018 10:22:30 +0100}, + biburl = {https://dblp.org/rec/bib/journals/cacm/Earley70}, + bibsource = {dblp computer science bibliography, https://dblp.org} +} + +@article{DBLP:journals/jfp/Hutton92, + author = {Graham Hutton}, + title = {Higher-Order Functions for Parsing}, + journal = {J. Funct. Program.}, + volume = {2}, + number = {3}, + pages = {323--343}, + year = {1992}, + url = {https://doi.org/10.1017/S0956796800000411}, + deactivated_doi = {10.1017/S0956796800000411}, + timestamp = {Sat, 27 May 2017 14:24:34 +0200}, + biburl = {https://dblp.org/rec/bib/journals/jfp/Hutton92}, + bibsource = {dblp computer science bibliography, https://dblp.org} +} \ No newline at end of file