c-parser: update to cartouches

This commit is contained in:
Gerwin Klein 2019-05-22 12:20:53 +10:00 committed by Matthew Brecknell
parent 26fdedad4d
commit dadcd8f65b
32 changed files with 164 additions and 164 deletions

View File

@ -41,9 +41,9 @@ ML_file "name_generation.ML"
(* set up hoare package to rewrite state updates more *)
setup {*
setup \<open>
Hoare.add_foldcongsimps [@{thm "update_update"}, @{thm "o_def"}]
*}
\<close>
(* Syntax for apply antiquotation parsing explicitly *)
@ -71,7 +71,7 @@ definition hrs_id :: "heap_raw_state \<Rightarrow> heap_raw_state" where
declare hrs_id_def [simp add]
parse_translation {*
parse_translation \<open>
let
fun ac x = Syntax.const "_antiquoteCur" $ Syntax.const x
fun aco x y = Syntax.const y $ (Syntax.const "globals" $ x)
@ -90,11 +90,11 @@ let
fun heap_assert_tr [b] = repl b
| heap_assert_tr ts = raise TERM ("heap_assert_tr", ts);
in [("_heap",K heap_assert_tr)] end;
*}
\<close>
(* Separation logic assertion parse translation *)
parse_translation {*
parse_translation \<open>
let
fun ac x = Syntax.const "_antiquoteCur" $ Syntax.const x
fun aco x y = Syntax.const y $ (Syntax.const "globals" $ x)
@ -121,7 +121,7 @@ let
fun sep_tr [t] = Syntax.const "sep_app" $ (*new_heap *) t $ hd ac
| sep_tr ts = raise TERM ("sep_tr", ts);
in [("_sep_assert",K sep_tr)] end;
*}
\<close>
definition c_null_guard :: "'a::c_type ptr_guard" where
@ -280,7 +280,7 @@ syntax (output)
"_Deref" :: "'b \<Rightarrow> 'b" ("*_" [1000] 1000)
"_AssignH" :: "'b => 'b => ('a,'p,'f) com" ("(2*_ :==/ _)" [30, 30] 23)
print_translation {*
print_translation \<open>
let
fun deref (Const ("_antiquoteCur",_)$Const (h,_)) p =
if h=NameGeneration.global_heap then Syntax.const "_Deref" $ p else
@ -301,14 +301,14 @@ let
fun assign_tr [l,r] = deref_assign l r
| assign_tr ts = raise Match
in [("CTypesDefs.lift",K lift_tr),("_Assign",K assign_tr)] end;
*}
\<close>
print_translation {*
print_translation \<open>
let
fun sep_app_tr [l,r] = Syntax.const "_sep_assert" $ l
| sep_app_tr ts = raise Match
in [("sep_app",K sep_app_tr)] end;
*}
\<close>
syntax "_h_t_valid" :: "'a::c_type ptr \<Rightarrow> bool" ("\<Turnstile>\<^sub>t _" [99] 100)

View File

@ -12,9 +12,9 @@ theory PackedTypes
imports "Word_Lib.WordSetup" CProof
begin
section {* Underlying definitions for the class axioms *}
section \<open>Underlying definitions for the class axioms\<close>
text {* field_access / field_update is the identity for packed types *}
text \<open>field_access / field_update is the identity for packed types\<close>
definition
"fa_fu_idem fd n \<equiv>
@ -39,7 +39,7 @@ where
lemmas td_fafu_idem_simps = fai0 fai1 fai2 fai3 fai4 fai5
text {* field_access is independent of the underlying bytes *}
text \<open>field_access is independent of the underlying bytes\<close>
definition
"fa_heap_indep fd n \<equiv>
@ -64,7 +64,7 @@ where
lemmas td_fa_hi_simps = fahi0 fahi1 fahi2 fahi3 fahi4 fahi5
section {* Lemmas about td_fafu_idem *}
section \<open>Lemmas about td_fafu_idem\<close>
lemma field_lookup_td_fafu_idem:
shows "\<And>(s :: 'a field_desc typ_desc) f m n. \<lbrakk> field_lookup t f m = Some (s, n); td_fafu_idem t \<rbrakk> \<Longrightarrow> td_fafu_idem s"
@ -225,7 +225,7 @@ next
by (rule field_lookup_offset2_list [where m = 0, simplified])
show "access_ti_list ts' (update_ti s bs v) (drop (size_td (dt_fst p')) bs') ! (x - size_td (dt_fst p')) = bs ! (x - n)"
using mlt nlex xln lbs lbs' wf wfts `td_fafu_idem s` `wf_fd s`
using mlt nlex xln lbs lbs' wf wfts \<open>td_fafu_idem s\<close> \<open>wf_fd s\<close>
by (simp add: Cons_typ_desc.hyps(2) [OF fl'] size_td_pair_dt_fst)
qed
}
@ -244,13 +244,13 @@ next
apply simp
done
hence ?case using wf lbs lbs' nlex xln wf wfts `td_fafu_idem s` `wf_fd s`
hence ?case using wf lbs lbs' nlex xln wf wfts \<open>td_fafu_idem s\<close> \<open>wf_fd s\<close>
by (simp add: nth_append length_fa_ti access_ti_pair_dt_fst size_td_pair_dt_fst ih[OF fl])
}
ultimately show ?case using `field_lookup_list (p' # ts') f 0 = Some (s, n)` by (simp split: option.splits)
ultimately show ?case using \<open>field_lookup_list (p' # ts') f 0 = Some (s, n)\<close> by (simp split: option.splits)
qed (clarsimp split: if_split_asm)+
subsection {* td_fa_hi *}
subsection \<open>td_fa_hi\<close>
(* \<lbrakk> size_of TYPE('a::mem_type) \<le> length h; size_of TYPE('a) \<le> length h' \<rbrakk> \<Longrightarrow> *)
@ -302,9 +302,9 @@ next
by simp (erule (2) DTPair_typ_desc.hyps)
qed
section {* Simp rules for deriving packed props from the type combinators *}
section \<open>Simp rules for deriving packed props from the type combinators\<close>
subsection {* td_fafu_idem *}
subsection \<open>td_fafu_idem\<close>
lemma td_fafu_idem_final_pad:
"padup (2 ^ align_td t) (size_td t) = 0
@ -455,7 +455,7 @@ lemma td_fafu_idem_empty_typ_info:
unfolding empty_typ_info_def
by simp
subsection {* td_fa_hi *}
subsection \<open>td_fa_hi\<close>
(* These are mostly identical to the above --- surely there is something which implies both? *)
@ -586,11 +586,11 @@ lemma td_fa_hi_empty_typ_info:
unfolding empty_typ_info_def
by simp
section {* The type class and simp sets *}
section \<open>The type class and simp sets\<close>
text {* Packed types, with no padding, have the defining property that
text \<open>Packed types, with no padding, have the defining property that
access is invariant under substitution of the underlying heap and
access/update is the identity *}
access/update is the identity\<close>
class packed_type = mem_type +
assumes td_fafu_idem: "td_fafu_idem (typ_info_t TYPE('a::c_type))"
@ -630,9 +630,9 @@ next
case (Cons x xs) thus ?case by (simp add: min_def ac_simps drop_take)
qed
section {* Instances *}
section \<open>Instances\<close>
text {* Words (of multiple of 8 size) are packed *}
text \<open>Words (of multiple of 8 size) are packed\<close>
instantiation word :: (len8) packed_type
begin
@ -643,7 +643,7 @@ instance
done
end
text {* Pointers are always packed *}
text \<open>Pointers are always packed\<close>
instantiation ptr :: (c_type)packed_type
begin
@ -654,7 +654,7 @@ instance
done
end
text {* Arrays of packed types are in turn packed *}
text \<open>Arrays of packed types are in turn packed\<close>
class array_outer_packed = packed_type + array_outer_max_size
class array_inner_packed = array_outer_packed + array_inner_max_size
@ -670,9 +670,9 @@ instance array :: (array_outer_packed, array_max_count) packed_type
instance array :: (array_inner_packed, array_max_count) array_outer_packed ..
section {* Theorems about packed types *}
section \<open>Theorems about packed types\<close>
subsection {* td_fa_hi *}
subsection \<open>td_fa_hi\<close>
lemma heap_independence:
"\<lbrakk>length h = size_of TYPE('a :: packed_type); length h' = size_of TYPE('a) \<rbrakk>
@ -705,7 +705,7 @@ lemma packed_heap_update_collapse_hrs:
unfolding hrs_mem_update_def
by (simp add: split_def packed_heap_update_collapse)
subsection {* td_fafu_idem *}
subsection \<open>td_fafu_idem\<close>
lemma order_leE:
fixes x :: "'a :: order"
@ -771,10 +771,10 @@ next
show ?case
proof (cases n')
case 0 thus ?thesis using `Suc m' < n'` by simp
case 0 thus ?thesis using \<open>Suc m' < n'\<close> by simp
next
case (Suc n'')
hence "m' < n''" using `Suc m' < n'` by simp
hence "m' < n''" using \<open>Suc m' < n'\<close> by simp
thus ?thesis using Suc
by (simp add: Suc.hyps ac_simps)
qed

View File

@ -60,7 +60,7 @@ lemma tree_eq_fun_in_range_split:
apply fastforce
done
ML {*
ML \<open>
structure StaticFun = struct
@ -137,7 +137,7 @@ fun define_tree_and_thms_with_defs name names key_defs opt_values ord ctxt = let
end
*}
\<close>
(* testing

View File

@ -14,7 +14,7 @@ begin
install_C_file "attributes.c"
ML {*
ML \<open>
local
open ProgramAnalysis
in
@ -22,18 +22,18 @@ fun global_details vi = (srcname vi, length (get_vi_attrs vi))
val all_global_details = get_globals #> map global_details
end
*}
\<close>
ML {*
ML \<open>
val results = CalculateState.get_csenv @{theory} "attributes.c"
|> the
|> all_global_details
|> sort (prod_ord string_ord int_ord)
*}
\<close>
ML {*
ML \<open>
val _ = if results = [("u",1), ("v", 1)] then ()
else error "Test case failure"
*}
\<close>
end

View File

@ -12,20 +12,20 @@ theory codetests
imports "CParser.CTranslation"
begin
ML {* Context.>> (Context.map_theory (
ML \<open>Context.>> (Context.map_theory (
TermsTypes.prim_mk_defn "foo" @{term "33::nat"}))
*}
\<close>
thm foo_def
ML {*
ML \<open>
Context.>> (Context.map_theory (
RecursiveRecordPackage.define_record_type [
{record_name = "myrecord",
fields = [{fldname = "fld1", fldty = @{typ "nat"}},
{fldname = "myset", fldty = @{typ "nat \<Rightarrow> bool"}}]}
]))
*}
\<close>
thm myrecord_accupd_same

View File

@ -20,11 +20,11 @@ where
"fac 0 = 1"
| "fac (Suc n) = (Suc n) * fac n"
ML {*
ML \<open>
val ast = IsarInstall.get_Csyntax @{theory} "fncall.c"
*}
\<close>
external_file "fncall.c"
install_C_file "fncall.c"

View File

@ -12,9 +12,9 @@ theory fnptr
imports "CParser.CTranslation"
begin
ML {*
ML \<open>
IsarInstall.install_C_file ((((NONE,NONE),NONE),"fnptr.c"),NONE) @{theory}
*}
\<close>
external_file "fnptr.c"
install_C_file "fnptr.c"

View File

@ -21,10 +21,10 @@ begin
thm f_body_def
end
ML {*
ML \<open>
val SOME cse = CalculateState.get_csenv @{theory} "gcc_attribs.c"
val spec1 = Symtab.lookup (ProgramAnalysis.function_specs cse) "myexit"
val spec2 = Symtab.lookup (ProgramAnalysis.function_specs cse) "myexit2"
*}
\<close>
end

View File

@ -21,7 +21,7 @@ thm f_body_def
thm g_body_def
end
text {*
text \<open>
Following proof confirms that we can prove stuff about g without needing
to prove that f is side-effect free; which ain't true. The translation
doesn't incorrectly reckon that the initialisation of local variable i
@ -29,7 +29,7 @@ text {*
This proof still works, but there aren't side-effect-free guards inserted
at any point in the current translation so the point is a bit moot.
*}
\<close>
lemma (in initialised_decls_global_addresses) foo:
shows "\<Gamma> \<turnstile> \<lbrace> True \<rbrace> \<acute>ret__int :== PROC g() \<lbrace> \<acute>ret__int = 3 \<rbrace>"

View File

@ -20,7 +20,7 @@ declare [[use_anonymous_local_variables=true]]
context jiraver150
begin
ML {* @{const_name "unsigned_char'local0_'"} *}
ML \<open>@{const_name "unsigned_char'local0_'"}\<close>
thm f_body_def
thm f2_body_def

View File

@ -12,14 +12,14 @@ theory jiraver313
imports "CParser.CTranslation"
begin
ML {* Feedback.verbosity_level := 6 *}
ML \<open>Feedback.verbosity_level := 6\<close>
declare [[calculate_modifies_proofs = false ]]
external_file "jiraver313.c"
install_C_file memsafe "jiraver313.c"
ML {*
ML \<open>
local
open Absyn
val cpp_record =
@ -31,7 +31,7 @@ in
val Decl d = hd decls
val VarDecl vd = RegionExtras.node d
end
*}
\<close>
context jiraver313
begin

View File

@ -21,14 +21,14 @@ begin
thm useContext_body_def
term "r2_C_update"
ML {*
ML \<open>
val Const(nm, ty) = @{term "ucptr_'"}
val (d, r) = TermsTypes.dom_rng ty
val (pointedTo_tyname, []) = dest_Type (TermsTypes.dest_ptr_ty r)
val basename = List.last (String.fields (fn c => c = #".") pointedTo_tyname)
val _ = basename = NameGeneration.mkAnonStructName 1 orelse
raise Fail "anonymous struct has unexpected name"
*}
\<close>
end (* context *)

View File

@ -23,7 +23,7 @@ thm f0_body_def
thm f1_body_def
thm f2_body_def
ML {*
ML \<open>
fun count c th =
let
@ -37,7 +37,7 @@ val f = count @{const Div_0};
(f @{thm f1_body_def} = 1 andalso f @{thm f2_body_def} = 1 andalso f @{thm f0_body_def} = 1)
orelse
OS.Process.exit OS.Process.failure
*}
\<close>
end

View File

@ -15,7 +15,7 @@ begin
external_file "longlong.c"
install_C_file "longlong.c"
ML {* NameGeneration.return_var_name (Absyn.Signed Absyn.LongLong) *}
ML \<open>NameGeneration.return_var_name (Absyn.Signed Absyn.LongLong)\<close>
context longlong

View File

@ -23,7 +23,7 @@ install_C_file "many_local_vars.c"
context "many_local_vars_global_addresses" begin
lemma "\<forall>\<sigma>. \<Gamma> \<turnstile>\<^bsub>/UNIV\<^esub> {\<sigma>} Call test_'proc
{t. t may_not_modify_globals \<sigma>}"
apply (tactic {* HoarePackage.vcg_tac "_modifies" "false" [] @{context} 1 *})
apply (tactic \<open>HoarePackage.vcg_tac "_modifies" "false" [] @{context} 1\<close>)
done
end

View File

@ -12,9 +12,9 @@ theory modifies_speed
imports "CParser.CTranslation"
begin
text {* Speed test for modifies proofs. *}
text \<open>Speed test for modifies proofs.\<close>
ML {*
ML \<open>
fun filename_relative thy name =
Path.append (Resources.master_directory thy) (Path.explode name)
@ -44,11 +44,11 @@ fun write_speed_test_file n_globs n_funcs = let
List.app write_func (1 upto n_funcs);
TextIO.closeOut f
end
*}
\<close>
ML {*
ML \<open>
write_speed_test_file 10 40
*}
\<close>
declare [[sorry_modifies_proofs = false]]

View File

@ -20,19 +20,19 @@ context multi_deref_global_addresses begin
thm f_body_def (* only 1 C_Guard; see JIRA VER-85 *)
thm g_body_def (* 2 C_Guards, one per deref; see JIRA VER-152 *)
ML {*
ML \<open>
val th = @{thm g_body_def}
val t = Thm.concl_of th
fun incifGuard (@{const "C_Guard"}) i = i + 1
| incifGuard _ i = i
*}
\<close>
ML {*
ML \<open>
fold_aterms incifGuard t 0 = 2 orelse
OS.Process.exit OS.Process.failure
*}
\<close>
end

View File

@ -28,14 +28,14 @@ begin
thm ts20110511_1_body_def
thm ts20110511_2_body_def
ML {*
ML \<open>
val bthm = @{thm "ts20110511_1_body_def"}
val b_t = Thm.concl_of bthm
val cs = Term.add_consts b_t []
*}
\<close>
ML {* member (=) (map #1 cs) "CProof.strictc_errortype.C_Guard" orelse
OS.Process.exit OS.Process.failure *}
ML \<open>member (=) (map #1 cs) "CProof.strictc_errortype.C_Guard" orelse
OS.Process.exit OS.Process.failure\<close>
end

View File

@ -24,16 +24,16 @@ thm h_body_def
thm j_body_def
thm k_body_def
ML {*
ML \<open>
val kthm = @{thm k_body_def}
val k_t = Thm.concl_of kthm
val cs = Term.add_consts k_t []
*}
\<close>
ML {*
ML \<open>
member (=) (map #1 cs) "CProof.strictc_errortype.C_Guard" orelse
OS.Process.exit OS.Process.failure
*}
\<close>
end

View File

@ -67,15 +67,15 @@ lemma typ_uinfo_array_tag_n_m_eq:
by (simp add: typ_uinfo_t_def typ_info_array array_tag_def
uinfo_array_tag_n_m_eq)
text {* Alternative to h_t_valid for arrays. This allows reasoning
about arrays of variable width. *}
text \<open>Alternative to h_t_valid for arrays. This allows reasoning
about arrays of variable width.\<close>
definition
h_t_array_valid :: "heap_typ_desc \<Rightarrow> ('a :: c_type) ptr \<Rightarrow> nat \<Rightarrow> bool"
where
"h_t_array_valid htd ptr n = valid_footprint htd (ptr_val ptr) (uinfo_array_tag_n_m TYPE ('a) n n)"
text {* Assertion that pointer p is within an array that continues
for at least n more elements. *}
text \<open>Assertion that pointer p is within an array that continues
for at least n more elements.\<close>
definition
"array_assertion (p :: ('a :: c_type) ptr) n htd
= (\<exists>q i j. h_t_array_valid htd q j
@ -120,7 +120,7 @@ lemma array_ptr_valid_array_assertionI:
by (auto dest: array_ptr_valid_array_assertionD
simp: array_assertion_shrink_right)
text {* Derived from array_assertion, an appropriate assertion for performing
text \<open>Derived from array_assertion, an appropriate assertion for performing
a pointer addition, or for dereferencing a pointer addition (the strong case).
In either case, there must be an array connecting the two ends of the pointer
@ -128,7 +128,7 @@ transition, with the caveat that the last address can be just out of the array
if the pointer is not dereferenced, thus the strong/weak distinction.
If the pointer doesn't actually move, nothing is learned.
*}
\<close>
definition
ptr_add_assertion :: "('a :: c_type) ptr \<Rightarrow> int \<Rightarrow> bool \<Rightarrow> heap_typ_desc \<Rightarrow> bool"
where
@ -155,8 +155,8 @@ lemma ptr_add_assertion_uint[simp]:
by (simp add: ptr_add_assertion_positive uint_0_iff unat_def
split: bool.split)
text {* Ignore char and void pointers. The C standard specifies that arithmetic on
char and void pointers doesn't create any special checks. *}
text \<open>Ignore char and void pointers. The C standard specifies that arithmetic on
char and void pointers doesn't create any special checks.\<close>
definition
ptr_add_assertion' :: "('a :: c_type) ptr \<Rightarrow> int \<Rightarrow> bool \<Rightarrow> heap_typ_desc \<Rightarrow> bool"
@ -176,7 +176,7 @@ lemma typ_name_void:
lemmas ptr_add_assertion' = ptr_add_assertion'_def td_diff_from_typ_name typ_name_void
text {* Mechanism for retyping a range of memory to a non-constant array size. *}
text \<open>Mechanism for retyping a range of memory to a non-constant array size.\<close>
definition
ptr_arr_retyps :: "nat \<Rightarrow> ('a :: c_type) ptr \<Rightarrow> heap_typ_desc \<Rightarrow> heap_typ_desc"

View File

@ -56,7 +56,7 @@ next
then obtain b B where
S: "S = insert b B \<and> b \<notin> B \<and> card B = n \<and> (n = 0 \<longrightarrow> B = {})"
by (auto simp: card_Suc_eq)
with `finite S` Suc.hyps [of B]
with \<open>finite S\<close> Suc.hyps [of B]
obtain f where IH: "(\<forall>m<n. f m \<in> B) \<and> (\<forall>x. x \<in> B \<longrightarrow> (\<exists>!m. m < n \<and> f m = x))" by auto
define f' where "f' \<equiv> \<lambda>m. if m = card B then b else f m"
from Suc.prems S IH
@ -87,7 +87,7 @@ lemma forall_finite_index:
by (metis (mono_tags, hide_lams) finite_index_works)
section {* Finite Cartesian Products *}
section \<open>Finite Cartesian Products\<close>
typedef ('a,'n::finite) array ("_[_]" [30,0] 31) = "UNIV :: ('n => 'a) set"
by simp

View File

@ -80,7 +80,7 @@ lemma update_ti_pair_t_dtpair [simp]:
"update_ti_pair_t (DTPair t f) = update_ti_t t"
by (rule ext, simp add: update_ti_pair_t_def update_ti_t_def)
text {* --------------------------------------------------------------- *}
text \<open>---------------------------------------------------------------\<close>
lemma field_desc_empty [simp]:
"field_desc (TypDesc (TypAggregate []) nm) = \<lparr> field_access = \<lambda>x bs. [],

View File

@ -127,12 +127,12 @@ end
subsection "Raw heap"
text {* A raw map from addresses to bytes *}
text \<open>A raw map from addresses to bytes\<close>
type_synonym heap_mem = "addr \<Rightarrow> byte"
text {* For heap h, pointer p and nat n, (heap_list h n p) returns the list
of bytes in the heap taken from addresses {p..+n} *}
text \<open>For heap h, pointer p and nat n, (heap_list h n p) returns the list
of bytes in the heap taken from addresses {p..+n}\<close>
primrec
heap_list :: "heap_mem \<Rightarrow> nat \<Rightarrow> addr \<Rightarrow> byte list"
@ -143,9 +143,9 @@ where
section "Intervals"
text {*
text \<open>
For word a and nat b, {a..+b} is the set of words x,
with unat (x - a) < b. *}
with unat (x - a) < b.\<close>
definition
intvl :: "'a::len word \<times> nat \<Rightarrow> 'a::len word set" where

View File

@ -25,11 +25,11 @@ type_synonym qualified_field_name = "field_name list"
type_synonym typ_name = string
text {* A typ_desc wraps a typ_struct with a typ name.
text \<open>A typ_desc wraps a typ_struct with a typ name.
A typ_struct is either a Scalar, with size, alignment and either a
field accessor/updator pair (for typ_info) or a 'normalisor'
(for typ_uinfo), or an Aggregate, with a list of typ_desc,
field name pairs.*}
field name pairs.\<close>
datatype
'a typ_desc = TypDesc "'a typ_struct" typ_name
@ -94,8 +94,8 @@ definition fu_commutes :: "('b \<Rightarrow> 'a \<Rightarrow> 'a) \<Rightarrow>
"fu_commutes f g \<equiv> \<forall>v bs bs'. f bs (g bs' v) = g bs' (f bs v)"
text {* size_td returns the sum of the sizes of all Scalar fields
comprising a typ_desc i.e. the overall size of the type *}
text \<open>size_td returns the sum of the sizes of all Scalar fields
comprising a typ_desc i.e. the overall size of the type\<close>
(* Could express this and many other typ_desc primrecs as tree fold/map
combos, but the intuition this way is clearer for anything non-trivial *)
@ -116,8 +116,8 @@ where
| tz5: "size_td_pair (DTPair t n) = size_td t"
text {* access_ti overlays the byte-wise representation of an object
on a given byte list, given the typ_info (i.e. the layout) *}
text \<open>access_ti overlays the byte-wise representation of an object
on a given byte list, given the typ_info (i.e. the layout)\<close>
primrec
access_ti :: "'a typ_info \<Rightarrow> ('a \<Rightarrow> byte list \<Rightarrow> byte list)" and
@ -145,8 +145,8 @@ text \<open>@{text access_ti\<^sub>0} overlays the representation of an object o
definition access_ti\<^sub>0 :: "'a typ_info \<Rightarrow> ('a \<Rightarrow> byte list)" where
"access_ti\<^sub>0 t \<equiv> \<lambda>v. access_ti t v (replicate (size_td t) 0)"
text {* update_ti updates an object, given a list of bytes (the
representation of the new value), and the typ_info *}
text \<open>update_ti updates an object, given a list of bytes (the
representation of the new value), and the typ_info\<close>
primrec
update_ti :: "'a typ_info \<Rightarrow> (byte list \<Rightarrow> 'a \<Rightarrow> 'a)" and
@ -167,8 +167,8 @@ where
| fu5: "update_ti_pair (DTPair t nm) = update_ti t"
text {* update_ti_t updates an object only if the length of the
supplied representation equals the object size *}
text \<open>update_ti_t updates an object only if the length of the
supplied representation equals the object size\<close>
definition update_ti_t :: "'a typ_info \<Rightarrow> (byte list \<Rightarrow> 'a \<Rightarrow> 'a)" where
"update_ti_t t \<equiv> \<lambda>bs. if length bs = size_td t then
@ -187,8 +187,8 @@ definition update_ti_pair_t :: "'a typ_info_pair \<Rightarrow> (byte list \<Righ
update_ti_pair t bs else id"
text {* field_desc generates the access/update pair for a field,
given the field's type_desc *}
text \<open>field_desc generates the access/update pair for a field,
given the field's type_desc\<close>
definition field_desc :: "'a typ_info \<Rightarrow> 'a field_desc" where
"field_desc t \<equiv> \<lparr> field_access = access_ti t,

View File

@ -46,7 +46,7 @@ lemma aggregate_x_struct_ex_tag [simp]:
lemma
"upd_local (x_example_update \<circ> (\<lambda>x _. x))"
apply(auto simp: upd_local_def )
apply(tactic {* Record.split_tac @{context} 1 *} )
apply(tactic \<open>Record.split_tac @{context} 1\<close> )
apply simp
done

View File

@ -8,31 +8,31 @@
* @TAG(NICTA_BSD)
*)
chapter {* More properties of maps plus map disjuction. *}
chapter \<open>More properties of maps plus map disjuction.\<close>
theory MapExtra
imports Main
begin
text {*
text \<open>
BEWARE: we are not interested in using the @{term "dom x \<inter> dom y = {}"}
rules from Map for our separation logic proofs. As such, we overwrite the
Map rules where that form of disjointness is in the assumption conflicts
with a name we want to use with @{text "\<bottom>"}. *}
with a name we want to use with @{text "\<bottom>"}.\<close>
text {*
text \<open>
A note on naming:
Anything not involving heap disjuction can potentially be incorporated
directly into Map.thy, thus uses @{text "m"}.
Anything involving heap disjunction is not really mergeable with Map, is
destined for use in separation logic, and hence uses @{text "h"}
*}
\<close>
text {*--------------------------------------- *}
text {* Things that should go into Option Type *}
text {*--------------------------------------- *}
text \<open>---------------------------------------\<close>
text \<open>Things that should go into Option Type\<close>
text \<open>---------------------------------------\<close>
text {* Misc option lemmas *}
text \<open>Misc option lemmas\<close>
lemma None_not_eq: "(None \<noteq> x) = (\<exists>y. x = Some y)" by (cases x) auto
@ -40,24 +40,24 @@ lemma None_com: "(None = x) = (x = None)" by fast
lemma Some_com: "(Some y = x) = (x = Some y)" by fast
text {*--------------------------------------- *}
text {* Things that should go into Map.thy *}
text {*--------------------------------------- *}
text \<open>---------------------------------------\<close>
text \<open>Things that should go into Map.thy\<close>
text \<open>---------------------------------------\<close>
text {* Map intersection: set of all keys for which the maps agree. *}
text \<open>Map intersection: set of all keys for which the maps agree.\<close>
definition
map_inter :: "('a \<rightharpoonup> 'b) \<Rightarrow> ('a \<rightharpoonup> 'b) \<Rightarrow> 'a set" (infixl "\<inter>\<^sub>m" 70) where
"m\<^sub>1 \<inter>\<^sub>m m\<^sub>2 \<equiv> {x \<in> dom m\<^sub>1. m\<^sub>1 x = m\<^sub>2 x}"
text {* Map restriction via domain subtraction *}
text \<open>Map restriction via domain subtraction\<close>
definition
sub_restrict_map :: "('a \<rightharpoonup> 'b) => 'a set => ('a \<rightharpoonup> 'b)" (infixl "`-" 110)
where
"m `- S \<equiv> (\<lambda>x. if x \<in> S then None else m x)"
subsection {* Properties of maps not related to restriction *}
subsection \<open>Properties of maps not related to restriction\<close>
lemma empty_forall_equiv: "(m = Map.empty) = (\<forall>x. m x = None)"
by (rule fun_eq_iff)
@ -116,7 +116,7 @@ lemma map_le_same_dom_eq:
"\<lbrakk> m\<^sub>0 \<subseteq>\<^sub>m m\<^sub>1 ; dom m\<^sub>0 = dom m\<^sub>1 \<rbrakk> \<Longrightarrow> m\<^sub>0 = m\<^sub>1"
by (simp add: map_le_antisym map_le_def)
subsection {* Properties of map restriction *}
subsection \<open>Properties of map restriction\<close>
lemma restrict_map_cancel:
"(m |` S = m |` T) = (dom m \<inter> S = dom m \<inter> T)"
@ -218,13 +218,13 @@ lemma prod_restrict_map_add:
by (auto simp: map_add_def restrict_map_def split: option.splits)
text {*--------------------------------------- *}
text {* Things that should NOT go into Map.thy *}
text {*--------------------------------------- *}
text \<open>---------------------------------------\<close>
text \<open>Things that should NOT go into Map.thy\<close>
text \<open>---------------------------------------\<close>
section {* Definitions *}
section \<open>Definitions\<close>
text {* Map disjuction *}
text \<open>Map disjuction\<close>
definition
map_disj :: "('a \<rightharpoonup> 'b) \<Rightarrow> ('a \<rightharpoonup> 'b) \<Rightarrow> bool" (infix "\<bottom>" 51) where
@ -232,7 +232,7 @@ definition
declare None_not_eq [simp]
text {* Heap monotonicity and the frame property *}
text \<open>Heap monotonicity and the frame property\<close>
definition
heap_mono :: "(('a \<rightharpoonup> 'b) \<Rightarrow> 'c option) \<Rightarrow> bool" where
@ -257,7 +257,7 @@ lemma heap_frameE:
unfolding heap_frame_def by fastforce
section {* Properties of @{term "sub_restrict_map"} *}
section \<open>Properties of @{term "sub_restrict_map"}\<close>
lemma restrict_map_sub_disj: "h |` S \<bottom> h `- S"
by (fastforce simp: sub_restrict_map_def restrict_map_def map_disj_def
@ -268,7 +268,7 @@ lemma restrict_map_sub_add: "h |` S ++ h `- S = h"
split: option.splits if_split)
section {* Properties of map disjunction *}
section \<open>Properties of map disjunction\<close>
lemma map_disj_empty_right [simp]:
"h \<bottom> Map.empty"
@ -291,7 +291,7 @@ lemma map_disjI:
by (simp add: map_disj_def)
subsection {* Map associativity-commutativity based on map disjuction *}
subsection \<open>Map associativity-commutativity based on map disjuction\<close>
lemma map_add_com:
"h\<^sub>0 \<bottom> h\<^sub>1 \<Longrightarrow> h\<^sub>0 ++ h\<^sub>1 = h\<^sub>1 ++ h\<^sub>0"
@ -309,27 +309,27 @@ lemma map_add_disj':
"(h\<^sub>1 ++ h\<^sub>2) \<bottom> h\<^sub>0 = (h\<^sub>1 \<bottom> h\<^sub>0 \<and> h\<^sub>2 \<bottom> h\<^sub>0)"
by (simp add: map_disj_def, fast)
text {*
text \<open>
We redefine @{term "map_add"} associativity to bind to the right, which
seems to be the more common case.
Note that when a theory includes Map again, @{text "map_add_assoc"} will
return to the simpset and will cause infinite loops if its symmetric
counterpart is added (e.g. via @{text "map_ac_simps"})
*}
\<close>
declare map_add_assoc [simp del]
text {*
text \<open>
Since the associativity-commutativity of @{term "map_add"} relies on
map disjunction, we include some basic rules into the ac set.
*}
\<close>
lemmas map_ac_simps =
map_add_assoc[symmetric] map_add_com map_disj_com
map_add_left_commute map_add_disj map_add_disj'
subsection {* Basic properties *}
subsection \<open>Basic properties\<close>
lemma map_disj_None_right:
"\<lbrakk> h\<^sub>0 \<bottom> h\<^sub>1 ; x \<in> dom h\<^sub>0 \<rbrakk> \<Longrightarrow> h\<^sub>1 x = None"
@ -352,7 +352,7 @@ lemma map_disj_common:
by (frule (1) map_disj_None_left', simp)
subsection {* Map disjunction and addition *}
subsection \<open>Map disjunction and addition\<close>
lemma map_add_eval_left:
"\<lbrakk> x \<in> dom h ; h \<bottom> h' \<rbrakk> \<Longrightarrow> (h ++ h') x = h x"
@ -444,7 +444,7 @@ lemma map_add_lr_disj:
(auto split: option.splits)
subsection {* Map disjunction and updates *}
subsection \<open>Map disjunction and updates\<close>
lemma map_disj_update_left [simp]:
"p \<in> dom h\<^sub>1 \<Longrightarrow> h\<^sub>0 \<bottom> h\<^sub>1(p \<mapsto> v) = h\<^sub>0 \<bottom> h\<^sub>1"
@ -470,7 +470,7 @@ lemma map_add3_update:
by (auto simp: map_add_update_left[symmetric] map_ac_simps)
subsection {* Map disjunction and @{term "map_le"} *}
subsection \<open>Map disjunction and @{term "map_le"}\<close>
lemma map_le_override [simp]:
"\<lbrakk> h \<bottom> h' \<rbrakk> \<Longrightarrow> h \<subseteq>\<^sub>m h ++ h'"
@ -527,7 +527,7 @@ lemma map_le_conv2:
by (case_tac "h\<^sub>0'=h\<^sub>0", insert map_le_conv, auto intro: exI[where x=Map.empty])
subsection {* Map disjunction and restriction *}
subsection \<open>Map disjunction and restriction\<close>
lemma map_disj_comp [simp]:
"h\<^sub>0 \<bottom> h\<^sub>1 |` (UNIV - dom h\<^sub>0)"

View File

@ -65,7 +65,7 @@ definition
where
"sep_cut x y \<equiv> sep_cut' x (unat y)"
text {* ---- *}
text \<open>----\<close>
(* FIXME MOVE *)
lemma heap_list_h_eq:

View File

@ -168,7 +168,7 @@ where
"x \<in> intra_deps C \<Longrightarrow> x \<in> proc_deps C \<Gamma>"
| "\<lbrakk> x \<in> proc_deps C \<Gamma>; \<Gamma> x = Some D; y \<in> intra_deps D \<rbrakk> \<Longrightarrow> y \<in> proc_deps C \<Gamma>"
text {* ---- *}
text \<open>----\<close>
lemma point_eq_mod_refl [simp]:
"point_eq_mod f f X"

View File

@ -21,11 +21,11 @@ definition inv_footprint :: "'a::c_type ptr \<Rightarrow> heap_assert" where
"inv_footprint p \<equiv> \<lambda>s. dom s = {(x,y). x \<in> {ptr_val p..+size_of TYPE('a)}} -
s_footprint p"
text {*
text \<open>
Like in Separation.thy, these arrows are defined using bsub and esub but
have an \emph{input} syntax abbreviation with just sub.
See original comment there for justification.
*}
\<close>
definition
sep_map_inv :: "'a::c_type ptr \<Rightarrow> 'a ptr_guard \<Rightarrow> 'a \<Rightarrow> heap_assert"
@ -68,7 +68,7 @@ definition
where
"g \<turnstile>\<^sub>s\<^sup>i p \<equiv> g \<turnstile>\<^sub>s p \<and>\<^sup>* inv_footprint p"
text {* ---- *}
text \<open>----\<close>
lemma sep_map'_g:
"(p \<hookrightarrow>\<^sup>i\<^sub>g v) s \<Longrightarrow> g p"

View File

@ -65,7 +65,7 @@ lemma sep_point_otherD:
"sep_points P s \<Longrightarrow> True"
by (rule TrueI)
ML {*
ML \<open>
val sep_point_ds = [@{thm sep_point_mapD}, @{thm sep_point_map_excD}]
@ -85,7 +85,7 @@ fun sep_point_tacs ds ctxt = [
]
fun sep_point_tac ds ctxt = EVERY (sep_point_tacs ds ctxt)
*}
\<close>
method_setup sep_point_tac =
"Attrib.thms >> (fn thms => Method.SIMPLE_METHOD o sep_point_tac thms)"
@ -97,10 +97,10 @@ lemma "(P \<and>\<^sup>* p \<mapsto>\<^sub>g v \<and>\<^sup>* Q) s \<Longrightar
section "sep_exists_tac"
ML{*
ML\<open>
fun sep_exists_tac ctxt = full_simp_tac
(put_simpset HOL_ss ctxt addsimps [@{thm "sep_conj_exists1"}, @{thm "sep_conj_exists2"}])
*}
\<close>
method_setup sep_exists_tac =
"Scan.succeed (Method.SIMPLE_METHOD' o sep_exists_tac)"
@ -153,7 +153,7 @@ lemma sep_mark2_left2:
"(P \<and>\<^sup>* sep_mark2 Q) = (sep_mark2 Q \<and>\<^sup>* P)"
by (rule sep_conj_com)
ML {*
ML \<open>
(* Replace all occurrences of an underscore that does not have an alphanumeric
on either side of it *)
@ -185,9 +185,9 @@ rusc "(_ +\<^sub>p _) \<mapsto> _";
rusc "_ \<mapsto>\<^sub>_ _";
rusc "_";
rusc "_ O_o _"
*}
\<close>
ML{*
ML\<open>
fun sep_select_tacs s ctxt =
let val (str, vars) = rusc s
@ -214,10 +214,10 @@ fun sep_select_tac s ctxt = SELECT_GOAL (EVERY (sep_select_tacs s ctxt))
fun lift_parser p (ctxt,ts) =
let val (r, ts') = p ts
in (r, (ctxt,ts')) end
*}
\<close>
method_setup sep_select_tac =
{* lift_parser Args.name >> (fn s => Method.SIMPLE_METHOD' o sep_select_tac s) *}
\<open>lift_parser Args.name >> (fn s => Method.SIMPLE_METHOD' o sep_select_tac s)\<close>
"takes a target conjunct in the goal and reorders it to the front"
lemma
@ -259,7 +259,7 @@ lemma ptr_retyp_sep_cut'_wp_hrs:
by (case_tac s)
(simp add: hrs_htd_update_def, erule (1) ptr_retyp_sep_cut'_wp)
ML {*
ML \<open>
fun destruct bs (Bound n) = Free (List.nth (bs,n))
| destruct bs (Abs (s,ty,t)) = destruct ((s,ty)::bs) t
@ -330,7 +330,7 @@ end
fun sep_wp_tac ctxt = (REPEAT (sep_wp_step_tac ctxt false)) THEN sep_wp_step_tac ctxt true
*}
\<close>
method_setup sep_wp_tac =
"Scan.succeed (Method.SIMPLE_METHOD o sep_wp_tac)"

View File

@ -46,13 +46,13 @@ definition
where
"singleton p v h d \<equiv> lift_state (heap_update p v h,d) |` s_footprint p"
text {*
text \<open>
Like in Separation.thy, these arrows are defined using bsub and esub but
have an \emph{input} syntax abbreviation with just sub.
Why? Because if sub is the only way, people write things like
@{text "p \<mapsto>\<^sup>i\<^sub>(f x y) v"} instead of @{text "p \<mapsto>\<^sup>i\<^bsub>(f x y)\<^esub> v"}. We preserve
the sub syntax though, because esub and bsub are a pain to type.
*}
\<close>
definition
sep_map :: "'a::c_type ptr \<Rightarrow> 'a ptr_guard \<Rightarrow> 'a \<Rightarrow> heap_assert"
@ -91,7 +91,7 @@ syntax
"_sep_assert" :: "bool \<Rightarrow> heap_state \<Rightarrow> bool" ("'(_')\<^bsup>sep\<^esup>" [0] 100)
text {* ---- *}
text \<open>----\<close>
lemma sep_empD:
"\<box> s \<Longrightarrow> s = Map.empty"

View File

@ -299,7 +299,7 @@ definition
where
"s \<bottom>\<^sub>\<tau> t \<equiv> typ_uinfo_t s \<bottom>\<^sub>t typ_uinfo_t t"
text {* ---- *}
text \<open>----\<close>
lemma wf_heap_val_SIndexVal_STyp_simp [simp]:
"wf_heap_val s \<Longrightarrow> s (x,SIndexVal) \<noteq> Some (STyp t)"