From 044a97ed1a6128de1a9d7c695ee77176e335464f Mon Sep 17 00:00:00 2001 From: Rafal Kolanski Date: Thu, 18 May 2023 12:36:09 +1000 Subject: [PATCH] aarch64 refine: first run through Schedule_R Signed-off-by: Rafal Kolanski --- proof/refine/AARCH64/Schedule_R.thy | 2460 +++++++++++++++++++++++++++ 1 file changed, 2460 insertions(+) create mode 100644 proof/refine/AARCH64/Schedule_R.thy diff --git a/proof/refine/AARCH64/Schedule_R.thy b/proof/refine/AARCH64/Schedule_R.thy new file mode 100644 index 000000000..911049366 --- /dev/null +++ b/proof/refine/AARCH64/Schedule_R.thy @@ -0,0 +1,2460 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +theory Schedule_R +imports VSpace_R +begin + +context begin interpretation Arch . (*FIXME: arch_split*) + +declare static_imp_wp[wp_split del] + +(* Levity: added (20090713 10:04:12) *) +declare sts_rel_idle [simp] + +lemma invs_no_cicd'_queues: + "invs_no_cicd' s \ valid_queues s" + unfolding invs_no_cicd'_def + by simp + +lemma corres_if2: + "\ G = G'; G \ corres r P P' a c; \ G' \ corres r Q Q' b d \ + \ corres r (if G then P else Q) (if G' then P' else Q') (if G then a else b) (if G' then c else d)" + by simp + +lemma findM_awesome': + assumes x: "\x xs. suffix (x # xs) xs' \ + corres (\a b. if b then (\a'. a = Some a' \ r a' (Some x)) else a = None) + P (P' (x # xs)) + ((f >>= (\x. return (Some x))) \ (return None)) (g x)" + assumes y: "corres r P (P' []) f (return None)" + assumes z: "\x xs. suffix (x # xs) xs' \ + \P' (x # xs)\ g x \\rv s. \ rv \ P' xs s\" + assumes p: "suffix xs xs'" + shows "corres r P (P' xs) f (findM g xs)" +proof - + have P: "f = do x \ (do x \ f; return (Some x) od) \ return None; if x \ None then return (the x) else f od" + apply (rule ext) + apply (auto simp add: bind_def alternative_def return_def split_def prod_eq_iff) + done + have Q: "\P\ (do x \ f; return (Some x) od) \ return None \\rv. if rv \ None then \ else P\" + by (wp alternative_wp | simp)+ + show ?thesis using p + apply (induct xs) + apply (simp add: y del: dc_simp) + apply (simp only: findM.simps) + apply (subst P) + apply (rule corres_guard_imp) + apply (rule corres_split[OF x]) + apply assumption + apply (rule corres_if2) + apply (case_tac ra, clarsimp+)[1] + apply (rule corres_trivial, clarsimp) + apply (case_tac ra, simp_all)[1] + apply (erule(1) meta_mp [OF _ suffix_ConsD]) + apply (rule Q) + apply (rule hoare_post_imp [OF _ z]) + apply simp+ + done +qed + +lemmas findM_awesome = findM_awesome' [OF _ _ _ suffix_order.refl] + +(* Levity: added (20090721 10:56:29) *) +declare objBitsT_koTypeOf [simp] + +(* FIXME AARCH64: ARM_HYP has a number of rules for idempotence of VM lookup functions over + heap updates at a VCPU ptr, culminating in: + valid_vs_lookup (s\kheap := kheap s(vcpuPtr \ ArchObj (VCPU vcpu))\) = valid_vs_lookup s + most of these have not been adapted from ARM_HYP since the VM lookup functions are very different +*) + +lemma vs_lookup_pages_vcpu_update: + "typ_at (AArch AVCPU) vcpuPtr s \ vs_lookup_pages (s\kheap := kheap s(vcpuPtr \ ArchObj (VCPU vcpu))\) + = vs_lookup_pages s" + sorry (* FIXME AARCH64 + by (clarsimp simp: vs_lookup_pages_def vs_lookup_pages1_vcpu_update) *) + +lemma valid_vs_lookup_vcpu_update: + "typ_at (AArch AVCPU) vcpuPtr s \ valid_vs_lookup (s\kheap := kheap s(vcpuPtr \ ArchObj (VCPU vcpu))\) + = valid_vs_lookup s" + apply (clarsimp simp: valid_vs_lookup_def caps_of_state_VCPU_update) + apply (rule all_cong1) + apply (rule all_cong1) + sorry (* FIXME AARCH64 + apply (rule imp_cong) + apply (simp add: vs_lookup_pages_vcpu_update) + apply simp + done *) + +lemma set_vpcu_valid_vs_lookup[wp]: + "set_vcpu vcpuPtr vcpu \\s. P (valid_vs_lookup s)\" + apply (rule hoare_pre, rule set_vcpu_wp) + apply (simp add: valid_vs_lookup_vcpu_update) + done + +(* FIXME AARCH64 move to ArchVSpace_AI to match ARM_HYP *) +crunches do_machine_op + for valid_vs_lookup2[wp]: "\s. P (valid_vs_lookup s)" + (ignore: get_object set_object) + +crunches vcpu_switch + for valid_vs_lookup[wp]: "\s. P (valid_vs_lookup s)" + (simp: crunch_simps wp: crunch_wps) + +(* FIXME AARCH64 won't crunch *) +lemma vcpu_switch_valid_asid_table[wp]: + "vcpu_switch v \valid_asid_table\" + sorry + +lemma vcpu_switch_valid_global_arch_objs[wp]: + "vcpu_switch v \valid_global_arch_objs\" + sorry (* FIXME AARCH64: crunchable? *) + +crunches set_vm_root + for pspace_aligned[wp]: pspace_aligned + and pspace_distinct[wp]: pspace_distinct + (simp: crunch_simps) + +(* FIXME AARCH64: re-examine sym_refs assumption *) +lemma arch_switchToThread_corres: + "corres dc (valid_arch_state and valid_objs and pspace_aligned and pspace_distinct + and valid_vspace_objs and st_tcb_at runnable t and (\s. sym_refs (state_hyp_refs_of s))) + (no_0_obj') + (arch_switch_to_thread t) (Arch.switchToThread t)" + apply (simp add: arch_switch_to_thread_def AARCH64_H.switchToThread_def) + apply (rule corres_guard_imp) + apply (rule corres_split[OF getObject_TCB_corres]) + apply (rule corres_split[OF vcpuSwitch_corres]) + apply (simp add: tcb_relation_def arch_tcb_relation_def) + apply (rule setVMRoot_corres[OF refl]) + apply (wpsimp simp: tcb_at_typ wp: hoare_vcg_imp_lift' getObject_tcb_wp)+ + subgoal + apply (auto simp: obj_at_def pred_tcb_at_def) + (* FIXME AARCH64 this should be provable via symrefs *) + sorry + apply normalise_obj_at' + (* FIXME AARCH64 this needs extra info (cross over from abstract?), as we don't have symrefs + on the haskell side + + for completeness, here is the ARM_HYP version of this proof: + + apply (simp add: arch_switch_to_thread_def ARM_HYP_H.switchToThread_def) + apply (rule corres_guard_imp) + apply (rule corres_split[OF get_tcb_corres]) + apply (rule corres_split[OF vcpuSwitch_corres']) + apply (clarsimp simp: tcb_relation_def arch_tcb_relation_def) + apply (rule corres_split[OF setVMRoot_corres]) + apply (rule corres_machine_op[OF corres_rel_imp]) + apply (rule corres_underlying_trivial) + apply wpsimp + apply (clarsimp simp: ARM_HYP.clearExMonitor_def)+ + apply wpsimp + apply wpsimp + apply (wpsimp wp: tcb_at_typ_at simp:) + apply (wpsimp simp: ARM_HYP.clearExMonitor_def wp: tcb_at_typ_at)+ + apply (wp getObject_tcb_hyp_sym_refs) + apply (clarsimp simp: st_tcb_at_tcb_at) + apply (clarsimp simp: get_tcb_ko_at[unfolded obj_at_def]) + apply (rule conjI) + apply (erule (1) pspace_valid_objsE) + apply (clarsimp simp: valid_obj_def valid_tcb_def valid_arch_tcb_def) + apply (rule conjI) + apply (clarsimp simp: valid_arch_state_def obj_at_def is_vcpu_def) + apply (clarsimp simp: ) + apply (rename_tac tcb vcpuPtr) + apply (drule_tac y=vcpuPtr and x=t and tp=HypTCBRef in sym_refsE + ; clarsimp simp: in_state_hyp_refs_of hyp_refs_of_rev obj_at_def hyp_live_def arch_live_def) + apply fastforce + + + *) + sorry + +lemma schedule_choose_new_thread_sched_act_rct[wp]: + "\\\ schedule_choose_new_thread \\rs s. scheduler_action s = resume_cur_thread\" + unfolding schedule_choose_new_thread_def + by wp + +lemma tcbSchedAppend_corres: + notes trans_state_update'[symmetric, simp del] + shows + "corres dc (is_etcb_at t and tcb_at t and pspace_aligned and pspace_distinct) + (Invariants_H.valid_queues and valid_queues') + (tcb_sched_action (tcb_sched_append) t) (tcbSchedAppend t)" + apply (rule corres_cross_over_guard[where P'=Q and Q="tcb_at' t and Q" for Q]) + apply (fastforce simp: tcb_at_cross state_relation_def) + apply (simp only: tcbSchedAppend_def tcb_sched_action_def) + apply (rule corres_symb_exec_r [OF _ _ threadGet_inv, where Q'="\rv. tcb_at' t and Invariants_H.valid_queues and valid_queues' and obj_at' (\obj. tcbQueued obj = rv) t"]) + defer + apply (wp threadGet_obj_at', simp, simp) + apply (rule no_fail_pre, wp, simp) + apply (case_tac queued) + apply (simp add: unless_def when_def) + apply (rule corres_no_failI) + apply wp+ + apply (clarsimp simp: in_monad ethread_get_def gets_the_def bind_assoc + assert_opt_def exec_gets is_etcb_at_def get_etcb_def get_tcb_queue_def + set_tcb_queue_def simpler_modify_def) + + apply (subgoal_tac "tcb_sched_append t (ready_queues a (tcb_domain y) (tcb_priority y)) + = (ready_queues a (tcb_domain y) (tcb_priority y))") + apply (simp add: state_relation_def ready_queues_relation_def) + apply (clarsimp simp: tcb_sched_append_def state_relation_def + valid_queues'_def ready_queues_relation_def + ekheap_relation_def etcb_relation_def + obj_at'_def inQ_def project_inject) + apply (drule_tac x=t in bspec,clarsimp) + apply clarsimp + apply (clarsimp simp: unless_def when_def cong: if_cong) + apply (rule stronger_corres_guard_imp) + apply (rule corres_split[where r'="(=)"]) + apply (rule ethreadget_corres) + apply (simp add: etcb_relation_def) + apply (rule corres_split[where r'="(=)"]) + apply (rule ethreadget_corres) + apply (simp add: etcb_relation_def) + apply (rule corres_split[where r'="(=)"]) + apply simp + apply (rule getQueue_corres) + apply (rule corres_split_noop_rhs2) + apply (simp add: tcb_sched_append_def) + apply (intro conjI impI) + apply (rule corres_guard_imp) + apply (rule setQueue_corres) + prefer 3 + apply (rule_tac P=\ and Q="K (t \ set queuea)" in corres_assume_pre) + apply simp + apply simp + apply simp + apply (rule corres_split_noop_rhs2) + apply (rule addToBitmap_if_null_noop_corres) + apply (rule threadSet_corres_noop, simp_all add: tcb_relation_def exst_same_def)[1] + apply wp+ + apply (wp getObject_tcb_wp | simp add: threadGet_def)+ + apply (fastforce simp: valid_queues_def valid_queues_no_bitmap_def obj_at'_def inQ_def + project_inject) + done + + +crunches tcbSchedEnqueue, tcbSchedAppend, tcbSchedDequeue + for valid_pspace'[wp]: valid_pspace' + and valid_arch_state'[wp]: valid_arch_state' + (simp: unless_def) + +crunches tcbSchedAppend, tcbSchedDequeue + for pred_tcb_at'[wp]: "pred_tcb_at' proj P t" + (wp: threadSet_pred_tcb_no_state simp: unless_def tcb_to_itcb'_def) + +lemma removeFromBitmap_valid_queues_no_bitmap_except[wp]: + "\ valid_queues_no_bitmap_except t \ + removeFromBitmap d p + \\_. valid_queues_no_bitmap_except t \" + unfolding bitmapQ_defs valid_queues_no_bitmap_except_def + by (wp| clarsimp simp: bitmap_fun_defs)+ + +lemma removeFromBitmap_bitmapQ: + "\ \s. True \ removeFromBitmap d p \\_ s. \ bitmapQ d p s \" + unfolding bitmapQ_defs bitmap_fun_defs + by (wp| clarsimp simp: bitmap_fun_defs)+ + +lemma removeFromBitmap_valid_bitmapQ[wp]: +" \ valid_bitmapQ_except d p and bitmapQ_no_L2_orphans and bitmapQ_no_L1_orphans and + (\s. ksReadyQueues s (d,p) = []) \ + removeFromBitmap d p + \\_. valid_bitmapQ \" +proof - + have "\ valid_bitmapQ_except d p and bitmapQ_no_L2_orphans and bitmapQ_no_L1_orphans and + (\s. ksReadyQueues s (d,p) = []) \ + removeFromBitmap d p + \\_. valid_bitmapQ_except d p and bitmapQ_no_L2_orphans and bitmapQ_no_L1_orphans and + (\s. \ bitmapQ d p s \ ksReadyQueues s (d,p) = []) \" + by (rule hoare_pre) + (wp removeFromBitmap_valid_queues_no_bitmap_except removeFromBitmap_valid_bitmapQ_except + removeFromBitmap_bitmapQ, simp) + thus ?thesis + by - (erule hoare_strengthen_post; fastforce elim: valid_bitmap_valid_bitmapQ_exceptE) +qed + +(* this should be the actual weakest precondition to establish valid_queues + under tagging a thread as not queued *) +lemma threadSet_valid_queues_dequeue_wp: + "\ valid_queues_no_bitmap_except t and + valid_bitmapQ and bitmapQ_no_L2_orphans and bitmapQ_no_L1_orphans and + (\s. \d p. t \ set (ksReadyQueues s (d,p))) \ + threadSet (tcbQueued_update (\_. False)) t + \\rv. valid_queues \" + unfolding threadSet_def + apply (rule hoare_seq_ext[OF _ getObject_tcb_sp]) + apply (rule hoare_pre) + apply (simp add: valid_queues_def valid_queues_no_bitmap_except_def valid_queues_no_bitmap_def) + apply (wp setObject_queues_unchanged_tcb hoare_Ball_helper hoare_vcg_all_lift + setObject_tcb_strongest) + apply (clarsimp simp: valid_queues_no_bitmap_except_def obj_at'_def valid_queues_no_bitmap_def) + done + +(* FIXME move *) +lemmas obj_at'_conjI = obj_at_conj' + +lemma setQueue_valid_queues_no_bitmap_except_dequeue_wp: + "\d p ts t. + \ \s. valid_queues_no_bitmap_except t s \ + (\t' \ set ts. obj_at' (inQ d p and runnable' \ tcbState) t' s) \ + t \ set ts \ distinct ts \ p \ maxPriority \ d \ maxDomain \ + setQueue d p ts + \\rv. valid_queues_no_bitmap_except t \" + unfolding setQueue_def valid_queues_no_bitmap_except_def null_def + by wp force + +definition (* if t is in a queue, it should be tagged with right priority and domain *) + "correct_queue t s \ \d p. t \ set(ksReadyQueues s (d, p)) \ + (obj_at' (\tcb. tcbQueued tcb \ tcbDomain tcb = d \ tcbPriority tcb = p) t s)" + +lemma valid_queues_no_bitmap_correct_queueI[intro]: + "valid_queues_no_bitmap s \ correct_queue t s" + unfolding correct_queue_def valid_queues_no_bitmap_def + by (fastforce simp: obj_at'_def inQ_def) + + +lemma tcbSchedDequeue_valid_queues_weak: + "\ valid_queues_no_bitmap_except t and valid_bitmapQ and + bitmapQ_no_L2_orphans and bitmapQ_no_L1_orphans and + correct_queue t and + obj_at' (\tcb. tcbDomain tcb \ maxDomain \ tcbPriority tcb \ maxPriority) t \ + tcbSchedDequeue t + \\_. Invariants_H.valid_queues\" +proof - + show ?thesis + unfolding tcbSchedDequeue_def null_def valid_queues_def + apply wp (* stops on threadSet *) + apply (rule hoare_post_eq[OF _ threadSet_valid_queues_dequeue_wp], + simp add: valid_queues_def) + apply (wp hoare_vcg_if_lift hoare_vcg_conj_lift hoare_vcg_imp_lift)+ + apply (wp hoare_vcg_imp_lift setQueue_valid_queues_no_bitmap_except_dequeue_wp + setQueue_valid_bitmapQ threadGet_const_tcb_at hoare_vcg_if_lift)+ + (* wp done *) + apply (normalise_obj_at') + apply (clarsimp simp: correct_queue_def) + apply (normalise_obj_at') + apply (fastforce simp add: valid_queues_no_bitmap_except_def valid_queues_no_bitmap_def elim: obj_at'_weaken)+ + done +qed + +lemma tcbSchedDequeue_valid_queues: + "\Invariants_H.valid_queues + and obj_at' (\tcb. tcbDomain tcb \ maxDomain) t + and obj_at' (\tcb. tcbPriority tcb \ maxPriority) t\ + tcbSchedDequeue t + \\_. Invariants_H.valid_queues\" + apply (rule hoare_pre, rule tcbSchedDequeue_valid_queues_weak) + apply (fastforce simp: valid_queues_def valid_queues_no_bitmap_def obj_at'_def inQ_def) + done + +lemma tcbSchedAppend_valid_queues'[wp]: + (* most of this is identical to tcbSchedEnqueue_valid_queues' in TcbAcc_R *) + "\valid_queues' and tcb_at' t\ tcbSchedAppend t \\_. valid_queues'\" + apply (simp add: tcbSchedAppend_def) + apply (rule hoare_pre) + apply (rule_tac B="\rv. valid_queues' and obj_at' (\obj. tcbQueued obj = rv) t" + in hoare_seq_ext) + apply (rename_tac queued) + apply (case_tac queued; simp_all add: unless_def when_def) + apply (wp threadSet_valid_queues' setQueue_valid_queues' | simp)+ + apply (subst conj_commute, wp) + apply (rule hoare_pre_post, assumption) + apply (clarsimp simp: addToBitmap_def modifyReadyQueuesL1Bitmap_def modifyReadyQueuesL2Bitmap_def + getReadyQueuesL1Bitmap_def getReadyQueuesL2Bitmap_def) + apply wp + apply fastforce + apply wp + apply (subst conj_commute) + apply clarsimp + apply (rule_tac Q="\rv. valid_queues' + and obj_at' (\obj. \ tcbQueued obj) t + and obj_at' (\obj. tcbPriority obj = prio) t + and obj_at' (\obj. tcbDomain obj = tdom) t + and (\s. t \ set (ksReadyQueues s (tdom, prio)))" + in hoare_post_imp) + apply (clarsimp simp: valid_queues'_def obj_at'_def inQ_def) + apply (wp setQueue_valid_queues' | simp | simp add: setQueue_def)+ + apply (wp getObject_tcb_wp | simp add: threadGet_def)+ + apply (clarsimp simp: obj_at'_def inQ_def valid_queues'_def) + apply (wp getObject_tcb_wp | simp add: threadGet_def)+ + apply (clarsimp simp: obj_at'_def) + done + +lemma threadSet_valid_queues'_dequeue: (* threadSet_valid_queues' is too weak for dequeue *) + "\\s. (\d p t'. obj_at' (inQ d p) t' s \ t' \ t \ t' \ set (ksReadyQueues s (d, p))) \ + obj_at' (inQ d p) t s \ + threadSet (tcbQueued_update (\_. False)) t + \\rv. valid_queues' \" + unfolding valid_queues'_def + apply (rule hoare_pre) + apply (wp hoare_vcg_all_lift) + apply (simp only: imp_conv_disj not_obj_at') + apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift) + apply (simp add: not_obj_at') + apply (clarsimp simp: typ_at_tcb') + apply normalise_obj_at' + apply (fastforce elim: obj_at'_weaken simp: inQ_def) + done + +lemma setQueue_ksReadyQueues_lift: + "\ \s. P (s\ksReadyQueues := (ksReadyQueues s)((d, p) := ts)\) ts \ + setQueue d p ts + \ \_ s. P s (ksReadyQueues s (d,p))\" + unfolding setQueue_def + by (wp, clarsimp simp: fun_upd_def cong: if_cong) + +lemma tcbSchedDequeue_valid_queues'[wp]: + "\valid_queues' and tcb_at' t\ + tcbSchedDequeue t \\_. valid_queues'\" + unfolding tcbSchedDequeue_def + apply (rule_tac B="\rv. valid_queues' and obj_at' (\obj. tcbQueued obj = rv) t" + in hoare_seq_ext) + prefer 2 + apply (wp threadGet_const_tcb_at) + apply (fastforce simp: obj_at'_def) + apply clarsimp + apply (rename_tac queued) + apply (case_tac queued, simp_all) + apply wp + apply (rule_tac d=tdom and p=prio in threadSet_valid_queues'_dequeue) + apply (rule hoare_pre_post, assumption) + apply (wp | clarsimp simp: bitmap_fun_defs)+ + apply (wp hoare_vcg_all_lift setQueue_ksReadyQueues_lift) + apply clarsimp + apply (wp threadGet_obj_at' threadGet_const_tcb_at)+ + apply clarsimp + apply (rule context_conjI, clarsimp simp: obj_at'_def) + apply (clarsimp simp: valid_queues'_def obj_at'_def inQ_def|wp)+ + done + +lemma tcbSchedAppend_iflive'[wp]: + "\if_live_then_nonz_cap' and ex_nonz_cap_to' tcb\ + tcbSchedAppend tcb \\_. if_live_then_nonz_cap'\" + apply (simp add: tcbSchedAppend_def unless_def) + apply (wp threadSet_iflive' hoare_drop_imps | simp add: crunch_simps)+ + done + +lemma tcbSchedDequeue_iflive'[wp]: + "\if_live_then_nonz_cap'\ tcbSchedDequeue tcb \\_. if_live_then_nonz_cap'\" + apply (simp add: tcbSchedDequeue_def) + apply (wp threadSet_iflive' hoare_when_weak_wp | simp add: crunch_simps)+ + apply ((wp | clarsimp simp: bitmap_fun_defs)+)[1] (* deal with removeFromBitmap *) + apply (wp threadSet_iflive' hoare_when_weak_wp | simp add: crunch_simps)+ + apply (rule_tac Q="\rv. \" in hoare_post_imp, fastforce) + apply (wp | simp add: crunch_simps)+ + done + +crunches tcbSchedAppend, tcbSchedDequeue, tcbSchedEnqueue + for typ_at'[wp]: "\s. P (typ_at' T p s)" + and tcb_at'[wp]: "tcb_at' t" + and ctes_of[wp]: "\s. P (ctes_of s)" + and ksInterrupt[wp]: "\s. P (ksInterruptState s)" + and irq_states[wp]: valid_irq_states' + and irq_node'[wp]: "\s. P (irq_node' s)" + and ct'[wp]: "\s. P (ksCurThread s)" + and global_refs'[wp]: valid_global_refs' + and ifunsafe'[wp]: if_unsafe_then_cap' + and cap_to'[wp]: "ex_nonz_cap_to' p" + and state_refs_of'[wp]: "\s. P (state_refs_of' s)" + and state_hyp_refs_of'[wp]: "\s. P (state_hyp_refs_of' s)" + and idle'[wp]: valid_idle' + (simp: unless_def crunch_simps) + +lemma tcbSchedEnqueue_vms'[wp]: + "\valid_machine_state'\ tcbSchedEnqueue t \\_. valid_machine_state'\" + apply (simp add: valid_machine_state'_def pointerInUserData_def pointerInDeviceData_def) + apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift tcbSchedEnqueue_ksMachine) + done + +lemma tcbSchedEnqueue_tcb_in_cur_domain'[wp]: + "\tcb_in_cur_domain' t'\ tcbSchedEnqueue t \\_. tcb_in_cur_domain' t' \" + apply (rule tcb_in_cur_domain'_lift) + apply wp + apply (clarsimp simp: tcbSchedEnqueue_def) + apply (wpsimp simp: unless_def)+ + done + +lemma ct_idle_or_in_cur_domain'_lift2: + "\ \t. \tcb_in_cur_domain' t\ f \\_. tcb_in_cur_domain' t\; + \P. \\s. P (ksCurThread s) \ f \\_ s. P (ksCurThread s) \; + \P. \\s. P (ksIdleThread s) \ f \\_ s. P (ksIdleThread s) \; + \P. \\s. P (ksSchedulerAction s) \ f \\_ s. P (ksSchedulerAction s) \\ + \ \ ct_idle_or_in_cur_domain'\ f \\_. ct_idle_or_in_cur_domain' \" + apply (unfold ct_idle_or_in_cur_domain'_def) + apply (rule hoare_lift_Pf2[where f=ksCurThread]) + apply (rule hoare_lift_Pf2[where f=ksSchedulerAction]) + including no_pre + apply (wp static_imp_wp hoare_vcg_disj_lift) + apply simp+ + done + +lemma tcbSchedEnqueue_invs'[wp]: + "\invs' + and st_tcb_at' runnable' t + and (\s. ksSchedulerAction s = ResumeCurrentThread \ ksCurThread s \ t)\ + tcbSchedEnqueue t + \\_. invs'\" + apply (simp add: invs'_def valid_state'_def) + apply (rule hoare_pre) + apply (wp tcbSchedEnqueue_ct_not_inQ valid_irq_node_lift irqs_masked_lift hoare_vcg_disj_lift + valid_irq_handlers_lift' cur_tcb_lift ct_idle_or_in_cur_domain'_lift2 + untyped_ranges_zero_lift + | simp add: cteCaps_of_def o_def + | auto elim!: st_tcb_ex_cap'' valid_objs'_maxDomain valid_objs'_maxPriority split: thread_state.split_asm simp: valid_pspace'_def)+ + done + +crunch ksMachine[wp]: tcbSchedAppend "\s. P (ksMachineState s)" + (simp: unless_def) + +lemma tcbSchedAppend_vms'[wp]: + "\valid_machine_state'\ tcbSchedAppend t \\_. valid_machine_state'\" + apply (simp add: valid_machine_state'_def pointerInUserData_def pointerInDeviceData_def) + apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift tcbSchedAppend_ksMachine) + done + +crunch pspace_domain_valid[wp]: tcbSchedAppend "pspace_domain_valid" + (simp: unless_def) + +crunch ksCurDomain[wp]: tcbSchedAppend "\s. P (ksCurDomain s)" +(simp: unless_def) + +crunch ksIdleThread[wp]: tcbSchedAppend "\s. P (ksIdleThread s)" +(simp: unless_def) + +crunch ksDomSchedule[wp]: tcbSchedAppend "\s. P (ksDomSchedule s)" +(simp: unless_def) + +lemma tcbSchedAppend_tcbDomain[wp]: + "\ obj_at' (\tcb. P (tcbDomain tcb)) t' \ + tcbSchedAppend t + \ \_. obj_at' (\tcb. P (tcbDomain tcb)) t' \" + apply (clarsimp simp: tcbSchedAppend_def) + apply (wpsimp simp: unless_def)+ + done + +lemma tcbSchedAppend_tcbPriority[wp]: + "\ obj_at' (\tcb. P (tcbPriority tcb)) t' \ + tcbSchedAppend t + \ \_. obj_at' (\tcb. P (tcbPriority tcb)) t' \" + apply (clarsimp simp: tcbSchedAppend_def) + apply (wpsimp simp: unless_def)+ + done + +lemma tcbSchedAppend_tcb_in_cur_domain'[wp]: + "\tcb_in_cur_domain' t'\ tcbSchedAppend t \\_. tcb_in_cur_domain' t' \" + apply (rule tcb_in_cur_domain'_lift) + apply wp+ + done + +crunch ksDomScheduleIdx[wp]: tcbSchedAppend "\s. P (ksDomScheduleIdx s)" + (simp: unless_def) + +crunches tcbSchedAppend, tcbSchedDequeue + for gsUntypedZeroRanges[wp]: "\s. P (gsUntypedZeroRanges s)" + (simp: unless_def) + +crunches tcbSchedDequeue, tcbSchedAppend + for arch'[wp]: "\s. P (ksArchState s)" + +lemma tcbSchedAppend_sch_act_wf[wp]: + "\\s. sch_act_wf (ksSchedulerAction s) s\ tcbSchedAppend thread + \\rv s. sch_act_wf (ksSchedulerAction s) s\" + apply (simp add:tcbSchedAppend_def bitmap_fun_defs) + apply (wp unless_wp setQueue_sch_act threadGet_wp|simp)+ + apply (fastforce simp:typ_at'_def obj_at'_def) + done + +lemma tcbSchedAppend_invs'[wp]: + "\invs' + and st_tcb_at' runnable' t + and (\s. ksSchedulerAction s = ResumeCurrentThread \ ksCurThread s \ t)\ + tcbSchedAppend t + \\_. invs'\" + apply (simp add: invs'_def valid_state'_def) + apply (rule hoare_pre) + apply (wp tcbSchedAppend_ct_not_inQ valid_irq_node_lift irqs_masked_lift hoare_vcg_disj_lift + valid_irq_handlers_lift' cur_tcb_lift ct_idle_or_in_cur_domain'_lift2 + untyped_ranges_zero_lift + | simp add: cteCaps_of_def o_def + | auto elim!: st_tcb_ex_cap'' valid_objs'_maxDomain valid_objs'_maxPriority + split: thread_state.split_asm + simp: valid_pspace'_def)+ + done + +lemma tcbSchedEnqueue_invs'_not_ResumeCurrentThread: + "\invs' + and st_tcb_at' runnable' t + and (\s. ksSchedulerAction s \ ResumeCurrentThread)\ + tcbSchedEnqueue t + \\_. invs'\" + by wpsimp + +lemma tcbSchedAppend_invs'_not_ResumeCurrentThread: + "\invs' + and st_tcb_at' runnable' t + and (\s. ksSchedulerAction s \ ResumeCurrentThread)\ + tcbSchedAppend t + \\_. invs'\" + by wpsimp + +lemma tcb_at'_has_tcbDomain: + "tcb_at' t s \ \p. obj_at' (\tcb. tcbDomain tcb = p) t s" + by (clarsimp simp add: obj_at'_def) + +crunch ksMachine[wp]: tcbSchedDequeue "\s. P (ksMachineState s)" + (simp: unless_def) + +lemma tcbSchedDequeue_vms'[wp]: + "\valid_machine_state'\ tcbSchedDequeue t \\_. valid_machine_state'\" + apply (simp add: valid_machine_state'_def pointerInUserData_def pointerInDeviceData_def) + apply (wp hoare_vcg_all_lift hoare_vcg_disj_lift tcbSchedDequeue_ksMachine) + done + +crunch pspace_domain_valid[wp]: tcbSchedDequeue "pspace_domain_valid" + +crunch ksCurDomain[wp]: tcbSchedDequeue "\s. P (ksCurDomain s)" +(simp: unless_def) + +crunch ksIdleThread[wp]: tcbSchedDequeue "\s. P (ksIdleThread s)" +(simp: unless_def) + +crunch ksDomSchedule[wp]: tcbSchedDequeue "\s. P (ksDomSchedule s)" +(simp: unless_def) + +lemma tcbSchedDequeue_tcb_in_cur_domain'[wp]: + "\tcb_in_cur_domain' t'\ tcbSchedDequeue t \\_. tcb_in_cur_domain' t' \" + apply (rule tcb_in_cur_domain'_lift) + apply wp + apply (clarsimp simp: tcbSchedDequeue_def) + apply (wp hoare_when_weak_wp | simp)+ + done + +lemma tcbSchedDequeue_tcbDomain[wp]: + "\ obj_at' (\tcb. P (tcbDomain tcb)) t' \ + tcbSchedDequeue t + \ \_. obj_at' (\tcb. P (tcbDomain tcb)) t' \" + apply (clarsimp simp: tcbSchedDequeue_def) + apply (wp hoare_when_weak_wp | simp)+ + done + +lemma tcbSchedDequeue_tcbPriority[wp]: + "\ obj_at' (\tcb. P (tcbPriority tcb)) t' \ + tcbSchedDequeue t + \ \_. obj_at' (\tcb. P (tcbPriority tcb)) t' \" + apply (clarsimp simp: tcbSchedDequeue_def) + apply (wp hoare_when_weak_wp | simp)+ + done + +crunch ksDomScheduleIdx[wp]: tcbSchedDequeue "\s. P (ksDomScheduleIdx s)" + (simp: unless_def) + +lemma tcbSchedDequeue_invs'[wp]: + "\invs' and tcb_at' t\ + tcbSchedDequeue t + \\_. invs'\" + unfolding invs'_def valid_state'_def + apply (rule hoare_pre) + apply (wp tcbSchedDequeue_ct_not_inQ sch_act_wf_lift valid_irq_node_lift irqs_masked_lift + valid_irq_handlers_lift' cur_tcb_lift ct_idle_or_in_cur_domain'_lift2 + tcbSchedDequeue_valid_queues + untyped_ranges_zero_lift + | simp add: cteCaps_of_def o_def)+ + apply (fastforce elim: valid_objs'_maxDomain valid_objs'_maxPriority simp: valid_pspace'_def)+ + done + +lemma setCurThread_corres: + "corres dc \ \ (modify (cur_thread_update (\_. t))) (setCurThread t)" + apply (unfold setCurThread_def) + apply (rule corres_modify) + apply (simp add: state_relation_def swp_def) + done + +crunches vcpuEnable, vcpuDisable, vcpuSave, vcpuRestore + for typ_at' [wp]: "\s. P (typ_at' T p s)" + (simp: crunch_simps + wp: crunch_wps getObject_inv loadObject_default_inv) + +lemma vcpuSwitch_typ_at'[wp]: + "\\s. P (typ_at' T p s)\ vcpuSwitch param_a \\_ s. P (typ_at' T p s) \" + by (wpsimp simp: vcpuSwitch_def modifyArchState_def | assumption)+ + +lemma arch_switch_thread_tcb_at'[wp]: + "\tcb_at' t\ Arch.switchToThread t \\_. tcb_at' t\" + by (unfold AARCH64_H.switchToThread_def, wp typ_at_lift_tcb') + +lemma [wp]: (* FIXME AARCH64 *) + "\pred_tcb_at' proj P t'\ updateASIDPoolEntry param_a param_b \\_. pred_tcb_at' proj P t'\" + sorry + +lemma [wp]: (* FIXME AARCH64 *) + "\pred_tcb_at' proj P t'\ loadVMID param_a \\_. pred_tcb_at' proj P t'\" + sorry + +lemma [wp]: (* FIXME AARCH64 *) + "\Invariants_H.valid_queues\ updateASIDPoolEntry param_a param_b \\_. Invariants_H.valid_queues\" + sorry + +lemma [wp]: (* FIXME AARCH64 *) + "\Invariants_H.valid_queues\ loadVMID param_a \\_. Invariants_H.valid_queues\" + sorry + +crunches setVMRoot + for pred_tcb_at'[wp]: "pred_tcb_at' proj P t'" + (simp: crunch_simps wp: crunch_wps) + +crunches vcpuSwitch + for pred_tcb_at'[wp]: "pred_tcb_at' proj P t'" + (simp: crunch_simps wp: crunch_wps) + +lemma Arch_switchToThread_pred_tcb'[wp]: + "Arch.switchToThread t \\s. P (pred_tcb_at' proj P' t' s)\" +proof - + have pos: "\P t t'. Arch.switchToThread t \pred_tcb_at' proj P t'\" + by (wpsimp wp: setVMRoot_obj_at simp: AARCH64_H.switchToThread_def) + show ?thesis + apply (rule P_bool_lift [OF pos]) + sorry (* FIXME AARCH64: where does this switchToThread_typ_at' come from? it's not in AInvs on any arch + by (rule lift_neg_pred_tcb_at' [OF ArchThreadDecls_H_AARCH64_H_switchToThread_typ_at' pos]) *) +qed + +crunches storeWordUser, setVMRoot, asUser, storeWordUser, Arch.switchToThread + for ksQ[wp]: "\s. P (ksReadyQueues s p)" + and ksIdleThread[wp]: "\s. P (ksIdleThread s)" + and valid_queues[wp]: "Invariants_H.valid_queues" + (wp: crunch_wps simp: crunch_simps) + +crunches arch_switch_to_thread + for pspace_aligned[wp]: pspace_aligned + and pspace_distinct[wp]: pspace_distinct + +lemma switchToThread_corres: + "corres dc (valid_arch_state and valid_objs + and valid_vspace_objs and pspace_aligned and pspace_distinct + and valid_vs_lookup and valid_global_objs + and unique_table_refs + and st_tcb_at runnable t and valid_etcbs and (\s. sym_refs (state_hyp_refs_of s))) + (no_0_obj' and Invariants_H.valid_queues) + (switch_to_thread t) (switchToThread t)" + (is "corres _ ?PA ?PH _ _") +proof - + have mainpart: "corres dc (?PA) (?PH) + (do y \ arch_switch_to_thread t; + y \ (tcb_sched_action tcb_sched_dequeue t); + modify (cur_thread_update (\_. t)) + od) + (do y \ Arch.switchToThread t; + y \ tcbSchedDequeue t; + setCurThread t + od)" + apply (rule corres_guard_imp) + apply (rule corres_split[OF arch_switchToThread_corres]) + apply (rule corres_split[OF tcbSchedDequeue_corres setCurThread_corres]) + apply (wp|clarsimp simp: tcb_at_is_etcb_at st_tcb_at_tcb_at)+ + done + + show ?thesis + apply - + apply (simp add: switch_to_thread_def Thread_H.switchToThread_def) + apply (rule corres_symb_exec_l [where Q = "\ s rv. (?PA and (=) rv) s", + OF corres_symb_exec_l [OF mainpart]]) + apply (auto intro: no_fail_pre [OF no_fail_assert] + no_fail_pre [OF no_fail_get] + dest: st_tcb_at_tcb_at [THEN get_tcb_at] | + simp add: assert_def | wp)+ + done +qed + +lemma tcb_at_idle_thread_lift: + assumes T: "\T' t. \typ_at T' t\ f \\rv. typ_at T' t\" + assumes I: "\P. \\s. P (idle_thread s)\ f \\rv s. P (idle_thread s)\" + shows "\\s. tcb_at (idle_thread s) s \ f \\rv s. tcb_at (idle_thread s) s\" + apply (simp add: tcb_at_typ) + apply (rule hoare_lift_Pf[where f=idle_thread]) + by (wpsimp wp: T I)+ + +lemma tcb_at'_ksIdleThread_lift: + assumes T: "\T' t. \typ_at' T' t\ f \\rv. typ_at' T' t\" + assumes I: "\P. \\s. P (ksIdleThread s)\ f \\rv s. P (ksIdleThread s)\" + shows "\\s. tcb_at' (ksIdleThread s) s \ f \\rv s. tcb_at' (ksIdleThread s) s\" + apply (simp add: tcb_at_typ_at') + apply (rule hoare_lift_Pf[where f=ksIdleThread]) + by (wpsimp wp: T I)+ + +crunches vcpu_update, vgic_update, vcpu_disable, vcpu_restore, vcpu_enable + for valid_asid_map[wp]: valid_asid_map + (simp: crunch_simps wp: crunch_wps) + +lemma arch_switchToIdleThread_corres: + "corres dc + (valid_arch_state and valid_objs and pspace_aligned and pspace_distinct + and valid_vspace_objs and valid_idle) + (no_0_obj') + arch_switch_to_idle_thread Arch.switchToIdleThread" + apply (simp add: arch_switch_to_idle_thread_def + AARCH64_H.switchToIdleThread_def) + apply (corressimp corres: getIdleThread_corres setVMRoot_corres + vcpuSwitch_corres[where vcpu=None, simplified]) + sorry (* FIXME AARCH64 need to deal with setGlobalUserVSpace_corres + apply (clarsimp simp: valid_idle_def valid_idle'_def pred_tcb_at_def obj_at_def is_tcb + valid_arch_state_asid_table valid_arch_state_global_arch_objs) + done *) + +lemma switchToIdleThread_corres: + "corres dc invs invs_no_cicd' switch_to_idle_thread switchToIdleThread" + apply (simp add: switch_to_idle_thread_def Thread_H.switchToIdleThread_def) + apply (rule corres_guard_imp) + apply (rule corres_split[OF getIdleThread_corres]) + apply (rule corres_split[OF arch_switchToIdleThread_corres]) + apply (unfold setCurThread_def) + apply (rule corres_trivial, rule corres_modify) + apply (simp add: state_relation_def cdt_relation_def) + apply (wp+, simp+) + apply (simp add: invs_unique_refs invs_valid_vs_lookup invs_valid_objs invs_valid_asid_map + invs_arch_state invs_valid_global_objs invs_psp_aligned invs_distinct + invs_valid_idle invs_vspace_objs) + apply (simp add: all_invs_but_ct_idle_or_in_cur_domain'_def valid_state'_def valid_pspace'_def) + done + +lemma gq_sp: "\P\ getQueue d p \\rv. P and (\s. ksReadyQueues s (d, p) = rv)\" + by (unfold getQueue_def, rule gets_sp) + +lemma sch_act_wf: + "sch_act_wf sa s = ((\t. sa = SwitchToThread t \ st_tcb_at' runnable' t s \ + tcb_in_cur_domain' t s) \ + (sa = ResumeCurrentThread \ ct_in_state' activatable' s))" + by (case_tac sa, simp_all add: ) + +declare gq_wp[wp] +declare setQueue_obj_at[wp] + +lemma threadSet_timeslice_invs: + "\invs' and tcb_at' t\ threadSet (tcbTimeSlice_update b) t \\rv. invs'\" + by (wp threadSet_invs_trivial, simp_all add: inQ_def cong: conj_cong) + +lemma setCurThread_invs_no_cicd': + "\invs_no_cicd' and st_tcb_at' activatable' t and obj_at' (\x. \ tcbQueued x) t and tcb_in_cur_domain' t\ + setCurThread t + \\rv. invs'\" +proof - + have ct_not_inQ_ct: "\s t . \ ct_not_inQ s; obj_at' (\x. \ tcbQueued x) t s\ \ ct_not_inQ (s\ ksCurThread := t \)" + apply (simp add: ct_not_inQ_def o_def) + done + show ?thesis + apply (simp add: setCurThread_def) + apply wp + apply (clarsimp simp add: all_invs_but_ct_idle_or_in_cur_domain'_def invs'_def cur_tcb'_def + valid_state'_def Invariants_H.valid_queues_def + sch_act_wf ct_in_state'_def state_refs_of'_def + ps_clear_def valid_irq_node'_def valid_queues'_def ct_not_inQ_ct + ct_idle_or_in_cur_domain'_def + bitmapQ_defs valid_queues_no_bitmap_def + cong: option.case_cong) + done +qed + +(* Don't use this rule when considering the idle thread. The invariant ct_idle_or_in_cur_domain' + says that either "tcb_in_cur_domain' t" or "t = ksIdleThread s". + Use setCurThread_invs_idle_thread instead. *) +lemma setCurThread_invs: + "\invs' and st_tcb_at' activatable' t and obj_at' (\x. \ tcbQueued x) t and + tcb_in_cur_domain' t\ setCurThread t \\rv. invs'\" + by (rule hoare_pre, rule setCurThread_invs_no_cicd') + (simp add: invs'_to_invs_no_cicd'_def) + +lemma valid_queues_not_runnable_not_queued: + fixes s + assumes vq: "Invariants_H.valid_queues s" + and vq': "valid_queues' s" + and st: "st_tcb_at' (Not \ runnable') t s" + shows "obj_at' (Not \ tcbQueued) t s" +proof (rule ccontr) + assume "\ obj_at' (Not \ tcbQueued) t s" + moreover from st have "typ_at' TCBT t s" + by (rule pred_tcb_at' [THEN tcb_at_typ_at' [THEN iffD1]]) + ultimately have "obj_at' tcbQueued t s" + by (clarsimp simp: not_obj_at' comp_def) + + moreover + from st [THEN pred_tcb_at', THEN tcb_at'_has_tcbPriority] + obtain p where tp: "obj_at' (\tcb. tcbPriority tcb = p) t s" + by clarsimp + + moreover + from st [THEN pred_tcb_at', THEN tcb_at'_has_tcbDomain] + obtain d where td: "obj_at' (\tcb. tcbDomain tcb = d) t s" + by clarsimp + + ultimately + have "t \ set (ksReadyQueues s (d, p))" using vq' + unfolding valid_queues'_def + apply - + apply (drule_tac x=d in spec) + apply (drule_tac x=p in spec) + apply (drule_tac x=t in spec) + apply (erule impE) + apply (fastforce simp add: inQ_def obj_at'_def) + apply (assumption) + done + + with vq have "st_tcb_at' runnable' t s" + unfolding Invariants_H.valid_queues_def valid_queues_no_bitmap_def + apply - + apply clarsimp + apply (drule_tac x=d in spec) + apply (drule_tac x=p in spec) + apply (clarsimp simp add: st_tcb_at'_def) + apply (drule(1) bspec) + apply (erule obj_at'_weakenE) + apply (clarsimp) + done + + with st show False + apply - + apply (drule(1) pred_tcb_at_conj') + apply (clarsimp) + done +qed + +(* + * The idle thread is not part of any ready queues. + *) +lemma idle'_not_tcbQueued': + assumes vq: "Invariants_H.valid_queues s" + and vq': "valid_queues' s" + and idle: "valid_idle' s" + shows "obj_at' (Not \ tcbQueued) (ksIdleThread s) s" +proof - + from idle have stidle: "st_tcb_at' (Not \ runnable') (ksIdleThread s) s" + by (clarsimp simp add: valid_idle'_def pred_tcb_at'_def obj_at'_def idle_tcb'_def) + with vq vq' show ?thesis + by (rule valid_queues_not_runnable_not_queued) +qed + +lemma setCurThread_invs_no_cicd'_idle_thread: + "\invs_no_cicd' and (\s. t = ksIdleThread s) \ setCurThread t \\rv. invs'\" +proof - + have ct_not_inQ_ct: "\s t . \ ct_not_inQ s; obj_at' (\x. \ tcbQueued x) t s\ \ ct_not_inQ (s\ ksCurThread := t \)" + apply (simp add: ct_not_inQ_def o_def) + done + have idle'_activatable': "\ s t. st_tcb_at' idle' t s \ st_tcb_at' activatable' t s" + apply (clarsimp simp: st_tcb_at'_def o_def obj_at'_def) + done + show ?thesis + apply (simp add: setCurThread_def) + apply wp + apply (clarsimp simp: all_invs_but_ct_idle_or_in_cur_domain'_def) + apply (frule (2) idle'_not_tcbQueued'[simplified o_def]) + apply (clarsimp simp add: ct_not_inQ_ct idle'_activatable' + invs'_def cur_tcb'_def valid_state'_def valid_idle'_def + sch_act_wf ct_in_state'_def state_refs_of'_def + ps_clear_def valid_irq_node'_def + ct_idle_or_in_cur_domain'_def tcb_in_cur_domain'_def + valid_queues_def bitmapQ_defs valid_queues_no_bitmap_def valid_queues'_def + pred_tcb_at'_def + cong: option.case_cong) + apply (clarsimp simp: obj_at'_def idle_tcb'_def ) + done +qed + +lemma setCurThread_invs_idle_thread: + "\invs' and (\s. t = ksIdleThread s) \ setCurThread t \\rv. invs'\" + by (rule hoare_pre, rule setCurThread_invs_no_cicd'_idle_thread) + (clarsimp simp: invs'_to_invs_no_cicd'_def all_invs_but_ct_idle_or_in_cur_domain'_def) + +lemma Arch_switchToThread_invs[wp]: + "\invs' and tcb_at' t\ Arch.switchToThread t \\rv. invs'\" + unfolding AARCH64_H.switchToThread_def by (wpsimp wp: getObject_tcb_hyp_sym_refs) + +lemma [wp]: (* FIXME AARCH64 *) + "\\s. P (ksCurDomain s)\ loadVMID param_a \\_ s. P (ksCurDomain s)\" + sorry + +lemma [wp]: (* FIXME AARCH64 *) + "\\s. P (ksCurDomain s)\ updateASIDPoolEntry param_a param_b \\_ s. P (ksCurDomain s)\" + sorry + +crunch ksCurDomain[wp]: "Arch.switchToThread" "\s. P (ksCurDomain s)" + (simp: crunch_simps wp: crunch_wps) + +lemma Arch_swichToThread_tcbDomain_triv[wp]: + "\ obj_at' (\tcb. P (tcbDomain tcb)) t' \ Arch.switchToThread t \ \_. obj_at' (\tcb. P (tcbDomain tcb)) t' \" + apply (clarsimp simp: AARCH64_H.switchToThread_def storeWordUser_def) + apply (wp hoare_drop_imp | simp)+ + done + +lemma Arch_swichToThread_tcbPriority_triv[wp]: + "\ obj_at' (\tcb. P (tcbPriority tcb)) t' \ Arch.switchToThread t \ \_. obj_at' (\tcb. P (tcbPriority tcb)) t' \" + apply (clarsimp simp: AARCH64_H.switchToThread_def storeWordUser_def) + apply (wp hoare_drop_imp | simp)+ + done + +lemma Arch_switchToThread_tcb_in_cur_domain'[wp]: + "\tcb_in_cur_domain' t'\ Arch.switchToThread t \\_. tcb_in_cur_domain' t' \" + apply (rule tcb_in_cur_domain'_lift) + apply wp+ + done + +lemma tcbSchedDequeue_not_tcbQueued: + "\ tcb_at' t \ tcbSchedDequeue t \ \_. obj_at' (\x. \ tcbQueued x) t \" + apply (simp add: tcbSchedDequeue_def) + apply (wp|clarsimp)+ + apply (rule_tac Q="\queued. obj_at' (\x. tcbQueued x = queued) t" in hoare_post_imp) + apply (clarsimp simp: obj_at'_def) + apply (wp threadGet_obj_at') + apply (simp) + done + +lemma asUser_tcbState_inv[wp]: + "asUser t m \obj_at' (P \ tcbState) t\" + apply (simp add: asUser_def tcb_in_cur_domain'_def threadGet_def) + apply (wp threadSet_obj_at'_strongish getObject_tcb_wp | wpc | simp | clarsimp simp: obj_at'_def)+ + done + +lemma Arch_switchToThread_obj_at[wp]: + "Arch.switchToThread t \obj_at' (P \ tcbState) t\" + by (wpsimp simp: AARCH64_H.switchToThread_def) + +declare doMachineOp_obj_at[wp] + +crunch valid_arch_state'[wp]: asUser "valid_arch_state'" +(wp: crunch_wps simp: crunch_simps) + +crunch valid_irq_states'[wp]: asUser "valid_irq_states'" +(wp: crunch_wps simp: crunch_simps) + +crunch valid_machine_state'[wp]: asUser "valid_machine_state'" +(wp: crunch_wps simp: crunch_simps) + +crunch valid_queues'[wp]: asUser "valid_queues'" +(wp: crunch_wps simp: crunch_simps) + + +lemma asUser_valid_irq_node'[wp]: + "\\s. valid_irq_node' (irq_node' s) s\ asUser t (setRegister f r) + \\_ s. valid_irq_node' (irq_node' s) s\" + apply (rule_tac valid_irq_node_lift) + apply (simp add: asUser_def) + apply (wpsimp wp: crunch_wps)+ + done + +crunch irq_masked'_helper: asUser "\s. P (intStateIRQTable (ksInterruptState s))" +(wp: crunch_wps simp: crunch_simps) + +lemma asUser_irq_masked'[wp]: + "\irqs_masked'\ asUser t (setRegister f r) + \\_ . irqs_masked'\" + apply (rule irqs_masked_lift) + apply (rule asUser_irq_masked'_helper) + done + +lemma asUser_ct_not_inQ[wp]: + "\ct_not_inQ\ asUser t (setRegister f r) + \\_ . ct_not_inQ\" + apply (clarsimp simp: submonad_asUser.fn_is_sm submonad_fn_def) + apply (rule hoare_seq_ext)+ + prefer 4 + apply (rule stateAssert_sp) + prefer 3 + apply (rule gets_inv) + defer + apply (rule select_f_inv) + apply (case_tac x; simp) + apply (clarsimp simp: asUser_replace_def obj_at'_def fun_upd_def + split: option.split kernel_object.split) + apply wp + apply (clarsimp simp: ct_not_inQ_def obj_at'_def objBitsKO_def ps_clear_def dom_def) + apply (rule conjI; clarsimp; blast) + done + +crunch pspace_domain_valid[wp]: asUser "pspace_domain_valid" +(wp: crunch_wps simp: crunch_simps) + +crunch valid_dom_schedule'[wp]: asUser "valid_dom_schedule'" +(wp: crunch_wps simp: crunch_simps) + +crunch gsUntypedZeroRanges[wp]: asUser "\s. P (gsUntypedZeroRanges s)" + (wp: crunch_wps simp: unless_def) + +crunch ctes_of[wp]: asUser "\s. P (ctes_of s)" + (wp: crunch_wps simp: unless_def) + +lemmas asUser_cteCaps_of[wp] = cteCaps_of_ctes_of_lift[OF asUser_ctes_of] + +lemma asUser_utr[wp]: + "\untyped_ranges_zero'\ asUser f t \\_. untyped_ranges_zero'\" + apply (simp add: cteCaps_of_def) + apply (rule hoare_pre, wp untyped_ranges_zero_lift) + apply (simp add: o_def) + done + +(* FIXME AARCH64 ARM_HYP does not have this, adapted arch tcb parts, but might not be useful now + since it involves s, review *) +lemma threadSet_invs_no_cicd'_trivialT: + assumes x: "\tcb. \(getF,setF) \ ran tcb_cte_cases. getF (F tcb) = getF tcb" + assumes z: "\tcb. tcbState (F tcb) = tcbState tcb \ tcbDomain (F tcb) = tcbDomain tcb" + assumes w: "\tcb. is_aligned (tcbIPCBuffer tcb) msg_align_bits \ is_aligned (tcbIPCBuffer (F tcb)) msg_align_bits" + assumes a: "\tcb. tcbBoundNotification (F tcb) = tcbBoundNotification tcb" + assumes w: "\tcb. is_aligned (tcbIPCBuffer tcb) msg_align_bits \ is_aligned (tcbIPCBuffer (F tcb)) msg_align_bits" + assumes v: "\tcb. tcbDomain tcb \ maxDomain \ tcbDomain (F tcb) \ maxDomain" + assumes u: "\tcb. tcbPriority tcb \ maxPriority \ tcbPriority (F tcb) \ maxPriority" + assumes b: "\tcb. tcbMCP tcb \ maxPriority \ tcbMCP (F tcb) \ maxPriority" + assumes v: "\tcb. atcbVCPUPtr (tcbArch (F tcb)) = atcbVCPUPtr (tcbArch tcb)" + shows + "\\s. invs_no_cicd' s \ + (\d p. (\tcb. inQ d p tcb \ \ inQ d p (F tcb)) \ t \ set (ksReadyQueues s (d, p))) \ + (\ko d p. ko_at' ko t s \ inQ d p (F ko) \ \ inQ d p ko \ t \ set (ksReadyQueues s (d, p))) \ + ((\tcb. \ tcbQueued tcb \ tcbQueued (F tcb)) \ ex_nonz_cap_to' t s \ t \ ksCurThread s) \ + (\tcb. tcbQueued (F tcb) \ ksSchedulerAction s = ResumeCurrentThread \ tcbQueued tcb \ t \ ksCurThread s)\ + threadSet F t + \\rv. invs_no_cicd'\" +proof - + from z have domains: "\tcb. tcbDomain (F tcb) = tcbDomain tcb" by blast + note threadSet_sch_actT_P[where P=False, simplified] + have y: "\tcb. tcb_st_refs_of' (tcbState (F tcb)) = tcb_st_refs_of' (tcbState tcb) \ + valid_tcb_state' (tcbState (F tcb)) = valid_tcb_state' (tcbState tcb)" + by (auto simp: z) + show ?thesis + apply (simp add: invs_no_cicd'_def valid_state'_def split del: if_split) + apply (rule hoare_pre) + apply (wp x w v u b + threadSet_valid_pspace'T + threadSet_sch_actT_P[where P=False, simplified] + threadSet_valid_queues + threadSet_state_refs_of'T[where f'=id] + threadSet_state_hyp_refs_of' + threadSet_iflive'T + threadSet_ifunsafe'T + threadSet_idle'T + threadSet_global_refsT + irqs_masked_lift + valid_irq_node_lift + valid_irq_handlers_lift'' + threadSet_ctes_ofT + threadSet_not_inQ + threadSet_ct_idle_or_in_cur_domain' + threadSet_valid_dom_schedule' + threadSet_valid_queues' + threadSet_cur + untyped_ranges_zero_lift + |clarsimp simp: y z a v domains cteCaps_of_def valid_arch_tcb'_def |rule refl)+ + apply (clarsimp simp: obj_at'_def pred_tcb_at'_def) + apply (clarsimp simp: cur_tcb'_def valid_irq_node'_def valid_queues'_def o_def) + by (fastforce simp: domains ct_idle_or_in_cur_domain'_def tcb_in_cur_domain'_def z a) +qed + +lemmas threadSet_invs_no_cicd'_trivial = + threadSet_invs_no_cicd'_trivialT [OF all_tcbI all_tcbI all_tcbI all_tcbI, OF ball_tcb_cte_casesI] + +lemma asUser_invs_no_cicd'[wp]: + "\invs_no_cicd'\ asUser t m \\rv. invs_no_cicd'\" + apply (simp add: asUser_def split_def) + apply (wp hoare_drop_imps | simp)+ + apply (wp threadSet_invs_no_cicd'_trivial hoare_drop_imps | simp)+ + done + +lemma Arch_switchToThread_invs_no_cicd': + "\invs_no_cicd'\ Arch.switchToThread t \\rv. invs_no_cicd'\" + apply (wpsimp wp: getObject_tcb_hyp_sym_refs setVMRoot_invs_no_cicd' + simp: AARCH64_H.switchToThread_def) + by (clarsimp simp: all_invs_but_ct_idle_or_in_cur_domain'_def) + +lemma tcbSchedDequeue_invs_no_cicd'[wp]: + "\invs_no_cicd' and tcb_at' t\ + tcbSchedDequeue t + \\_. invs_no_cicd'\" + unfolding all_invs_but_ct_idle_or_in_cur_domain'_def valid_state'_def + apply (wp tcbSchedDequeue_ct_not_inQ sch_act_wf_lift valid_irq_node_lift irqs_masked_lift + valid_irq_handlers_lift' cur_tcb_lift ct_idle_or_in_cur_domain'_lift2 + tcbSchedDequeue_valid_queues_weak + untyped_ranges_zero_lift + | simp add: cteCaps_of_def o_def)+ + apply clarsimp + apply (fastforce simp: valid_pspace'_def valid_queues_def + elim: valid_objs'_maxDomain valid_objs'_maxPriority intro: obj_at'_conjI) + done + +lemma switchToThread_invs_no_cicd': + "\invs_no_cicd' and st_tcb_at' runnable' t and tcb_in_cur_domain' t \ ThreadDecls_H.switchToThread t \\rv. invs' \" + apply (simp add: Thread_H.switchToThread_def) + apply (wp setCurThread_invs_no_cicd' tcbSchedDequeue_not_tcbQueued + Arch_switchToThread_invs_no_cicd' Arch_switchToThread_pred_tcb') + apply (auto elim!: pred_tcb'_weakenE) + done + +lemma switchToThread_invs[wp]: + "\invs' and st_tcb_at' runnable' t and tcb_in_cur_domain' t \ switchToThread t \\rv. invs' \" + apply (simp add: Thread_H.switchToThread_def ) + apply (wp threadSet_timeslice_invs setCurThread_invs + Arch_switchToThread_invs dmo_invs' + doMachineOp_obj_at tcbSchedDequeue_not_tcbQueued) + by (auto elim!: pred_tcb'_weakenE) + +lemma setCurThread_ct_in_state: + "\obj_at' (P \ tcbState) t\ setCurThread t \\rv. ct_in_state' P\" +proof - + show ?thesis + apply (simp add: setCurThread_def) + apply wp + apply (simp add: ct_in_state'_def pred_tcb_at'_def o_def) + done +qed + +lemma switchToThread_ct_in_state[wp]: + "\obj_at' (P \ tcbState) t\ switchToThread t \\rv. ct_in_state' P\" +proof - + show ?thesis + apply (simp add: Thread_H.switchToThread_def tcbSchedEnqueue_def unless_def) + apply (wp setCurThread_ct_in_state Arch_switchToThread_obj_at + | simp add: o_def cong: if_cong)+ + done +qed + +lemma setCurThread_obj_at[wp]: + "\obj_at' P addr\ setCurThread t \\rv. obj_at' P addr\" + apply (simp add: setCurThread_def) + apply wp + apply (fastforce intro: obj_at'_pspaceI) + done + +lemma dmo_cap_to'[wp]: + "\ex_nonz_cap_to' p\ + doMachineOp m + \\rv. ex_nonz_cap_to' p\" + by (wp ex_nonz_cap_to_pres') + +lemma sct_cap_to'[wp]: + "\ex_nonz_cap_to' p\ setCurThread t \\rv. ex_nonz_cap_to' p\" + apply (simp add: setCurThread_def) + apply (wp ex_nonz_cap_to_pres') + apply (clarsimp elim!: cte_wp_at'_pspaceI)+ + done + +lemma setVCPU_cap_to'[wp]: + "\ex_nonz_cap_to' p\ setObject p' (v::vcpu) \\rv. ex_nonz_cap_to' p\" + by (wp ex_nonz_cap_to_pres') + +crunches + vcpuDisable, vcpuRestore, vcpuEnable, vcpuSaveRegRange, vgicUpdateLR, vcpuSave, vcpuSwitch + for cap_to'[wp]: "ex_nonz_cap_to' p" + (ignore: doMachineOp wp: crunch_wps) + +lemma [wp]: (* FIXME AARCH64 *) + "\ex_nonz_cap_to' p\ updateASIDPoolEntry param_a param_b \\_. ex_nonz_cap_to' p\" + sorry +lemma [wp]: (* FIXME AARCH64 *) + "\ex_nonz_cap_to' p\ loadVMID param_a \\_. ex_nonz_cap_to' p\" + sorry +crunch cap_to'[wp]: "Arch.switchToThread" "ex_nonz_cap_to' p" + (simp: crunch_simps wp: crunch_wps) + +crunch cap_to'[wp]: switchToThread "ex_nonz_cap_to' p" + (simp: crunch_simps) + +lemma no_longer_inQ[simp]: + "\ inQ d p (tcbQueued_update (\x. False) tcb)" + by (simp add: inQ_def) + +lemma iflive_inQ_nonz_cap_strg: + "if_live_then_nonz_cap' s \ obj_at' (inQ d prio) t s + \ ex_nonz_cap_to' t s" + by (clarsimp simp: obj_at'_real_def inQ_def live'_def + elim!: if_live_then_nonz_capE' ko_wp_at'_weakenE) + +lemmas iflive_inQ_nonz_cap[elim] + = mp [OF iflive_inQ_nonz_cap_strg, OF conjI[rotated]] + +declare Cons_eq_tails[simp] + +crunch ksCurDomain[wp]: "ThreadDecls_H.switchToThread" "\s. P (ksCurDomain s)" + +(* FIXME move *) +lemma obj_tcb_at': + "obj_at' (\tcb::tcb. P tcb) t s \ tcb_at' t s" + by (clarsimp simp: obj_at'_def) + +lemma invs'_not_runnable_not_queued: + fixes s + assumes inv: "invs' s" + and st: "st_tcb_at' (Not \ runnable') t s" + shows "obj_at' (Not \ tcbQueued) t s" + apply (insert assms) + apply (rule valid_queues_not_runnable_not_queued) + apply (clarsimp simp add: invs'_def valid_state'_def)+ + done + +lemma valid_queues_not_tcbQueued_not_ksQ: + fixes s + assumes vq: "Invariants_H.valid_queues s" + and notq: "obj_at' (Not \ tcbQueued) t s" + shows "\d p. t \ set (ksReadyQueues s (d, p))" +proof (rule ccontr, simp , erule exE, erule exE) + fix d p + assume "t \ set (ksReadyQueues s (d, p))" + with vq have "obj_at' (inQ d p) t s" + unfolding Invariants_H.valid_queues_def valid_queues_no_bitmap_def + apply clarify + apply (drule_tac x=d in spec) + apply (drule_tac x=p in spec) + apply (clarsimp) + apply (drule(1) bspec) + apply (erule obj_at'_weakenE) + apply (simp) + done + hence "obj_at' tcbQueued t s" + apply (rule obj_at'_weakenE) + apply (simp only: inQ_def) + done + with notq show "False" + by (clarsimp simp: obj_at'_def) +qed + +lemma not_tcbQueued_not_ksQ: + fixes s + assumes "invs' s" + and "obj_at' (Not \ tcbQueued) t s" + shows "\d p. t \ set (ksReadyQueues s (d, p))" + apply (insert assms) + apply (clarsimp simp add: invs'_def valid_state'_def) + apply (drule(1) valid_queues_not_tcbQueued_not_ksQ) + apply (clarsimp) + done + +lemma ct_not_ksQ: + "\ invs' s; ksSchedulerAction s = ResumeCurrentThread \ + \ \p. ksCurThread s \ set (ksReadyQueues s p)" + apply (clarsimp simp: invs'_def valid_state'_def ct_not_inQ_def) + apply (frule(1) valid_queues_not_tcbQueued_not_ksQ) + apply (fastforce) + done + +lemma setThreadState_rct: + "\\s. (runnable' st \ ksCurThread s \ t) + \ ksSchedulerAction s = ResumeCurrentThread\ + setThreadState st t + \\_ s. ksSchedulerAction s = ResumeCurrentThread\" + apply (simp add: setThreadState_def) + apply (rule hoare_pre_disj') + apply (rule hoare_seq_ext [OF _ + hoare_vcg_conj_lift + [OF threadSet_tcbState_st_tcb_at' [where P=runnable'] + threadSet_nosch]]) + apply (rule hoare_seq_ext [OF _ + hoare_vcg_conj_lift [OF isRunnable_const isRunnable_inv]]) + apply (clarsimp simp: when_def) + apply (case_tac x) + apply (clarsimp, wp)[1] + apply (clarsimp) + apply (rule hoare_seq_ext [OF _ + hoare_vcg_conj_lift + [OF threadSet_ct threadSet_nosch]]) + apply (rule hoare_seq_ext [OF _ isRunnable_inv]) + apply (rule hoare_seq_ext [OF _ + hoare_vcg_conj_lift + [OF gct_wp gct_wp]]) + apply (rename_tac ct) + apply (case_tac "ct\t") + apply (clarsimp simp: when_def) + apply (wp)[1] + apply (clarsimp) + done + +lemma bitmapQ_lookupBitmapPriority_simp: (* neater unfold, actual unfold is really ugly *) + "\ ksReadyQueuesL1Bitmap s d \ 0 ; + valid_bitmapQ s ; bitmapQ_no_L1_orphans s \ \ + bitmapQ d (lookupBitmapPriority d s) s = + (ksReadyQueuesL1Bitmap s d !! word_log2 (ksReadyQueuesL1Bitmap s d) \ + ksReadyQueuesL2Bitmap s (d, invertL1Index (word_log2 (ksReadyQueuesL1Bitmap s d))) !! + word_log2 (ksReadyQueuesL2Bitmap s (d, invertL1Index (word_log2 (ksReadyQueuesL1Bitmap s d)))))" + unfolding bitmapQ_def lookupBitmapPriority_def + apply (drule word_log2_nth_same, clarsimp) + apply (drule (1) bitmapQ_no_L1_orphansD, clarsimp) + apply (drule word_log2_nth_same, clarsimp) + apply (frule test_bit_size[where n="word_log2 (ksReadyQueuesL2Bitmap _ _)"]) + apply (clarsimp simp: numPriorities_def wordBits_def word_size) + apply (subst prioToL1Index_l1IndexToPrio_or_id) + apply (subst unat_of_nat_eq) + apply (fastforce intro: unat_less_helper word_log2_max[THEN order_less_le_trans] + simp: wordRadix_def word_size l2BitmapSize_def')+ + apply (subst prioToL1Index_l1IndexToPrio_or_id) + apply (fastforce intro: unat_less_helper word_log2_max of_nat_mono_maybe + simp: wordRadix_def word_size l2BitmapSize_def')+ + apply (simp add: word_ao_dist) + apply (subst less_mask_eq) + apply (rule word_of_nat_less) + apply (fastforce intro: word_of_nat_less simp: wordRadix_def' unat_of_nat word_size)+ + done + +lemma bitmapQ_from_bitmap_lookup: + "\ ksReadyQueuesL1Bitmap s d \ 0 ; + valid_bitmapQ s ; bitmapQ_no_L1_orphans s + \ + \ bitmapQ d (lookupBitmapPriority d s) s" + apply (simp add: bitmapQ_lookupBitmapPriority_simp) + apply (drule word_log2_nth_same) + apply (drule (1) bitmapQ_no_L1_orphansD) + apply (fastforce dest!: word_log2_nth_same + simp: word_ao_dist lookupBitmapPriority_def word_size numPriorities_def + wordBits_def) + done + +lemma lookupBitmapPriority_obj_at': + "\ksReadyQueuesL1Bitmap s (ksCurDomain s) \ 0; valid_queues_no_bitmap s; valid_bitmapQ s; + bitmapQ_no_L1_orphans s\ + \ obj_at' (inQ (ksCurDomain s) (lookupBitmapPriority (ksCurDomain s) s) and runnable' \ tcbState) + (hd (ksReadyQueues s (ksCurDomain s, lookupBitmapPriority (ksCurDomain s) s))) s" + apply (drule (2) bitmapQ_from_bitmap_lookup) + apply (simp add: valid_bitmapQ_bitmapQ_simp) + apply (case_tac "ksReadyQueues s (ksCurDomain s, lookupBitmapPriority (ksCurDomain s) s)", simp) + apply (clarsimp, rename_tac t ts) + apply (drule cons_set_intro) + apply (drule (2) valid_queues_no_bitmap_objD) + done + +lemma bitmapL1_zero_ksReadyQueues: + "\ valid_bitmapQ s ; bitmapQ_no_L1_orphans s \ + \ (ksReadyQueuesL1Bitmap s d = 0) = (\p. ksReadyQueues s (d,p) = [])" + apply (cases "ksReadyQueuesL1Bitmap s d = 0") + apply (force simp add: bitmapQ_def valid_bitmapQ_def) + apply (fastforce dest: bitmapQ_from_bitmap_lookup simp: valid_bitmapQ_bitmapQ_simp) + done + +lemma prioToL1Index_le_mask: + "\ prioToL1Index p = prioToL1Index p' ; p && mask wordRadix \ p' && mask wordRadix \ + \ p \ p'" + unfolding prioToL1Index_def + apply (simp add: wordRadix_def word_le_nat_alt[symmetric]) + apply (drule shiftr_eq_neg_mask_eq) + apply (metis add.commute word_and_le2 word_plus_and_or_coroll2 word_plus_mono_left) + done + +lemma prioToL1Index_le_index: + "\ prioToL1Index p \ prioToL1Index p' ; prioToL1Index p \ prioToL1Index p' \ + \ p \ p'" + unfolding prioToL1Index_def + apply (simp add: wordRadix_def word_le_nat_alt[symmetric]) + apply (erule (1) le_shiftr') + done + +lemma bitmapL1_highest_lookup: + "\ valid_bitmapQ s ; bitmapQ_no_L1_orphans s ; + bitmapQ d p' s \ + \ p' \ lookupBitmapPriority d s" + apply (subgoal_tac "ksReadyQueuesL1Bitmap s d \ 0") + prefer 2 + apply (clarsimp simp add: bitmapQ_def) + apply (case_tac "prioToL1Index (lookupBitmapPriority d s) = prioToL1Index p'") + apply (rule prioToL1Index_le_mask, simp) + apply (frule (2) bitmapQ_from_bitmap_lookup) + apply (clarsimp simp: bitmapQ_lookupBitmapPriority_simp) + apply (clarsimp simp: bitmapQ_def lookupBitmapPriority_def) + apply (subst mask_or_not_mask[where n=wordRadix and x=p', symmetric]) + apply (subst word_bw_comms(2)) (* || commute *) + apply (simp add: word_ao_dist mask_AND_NOT_mask mask_twice) + apply (subst less_mask_eq[where x="of_nat _"]) + apply (subst word_less_nat_alt) + apply (subst unat_of_nat_eq) + apply (rule order_less_le_trans[OF word_log2_max]) + apply (simp add: word_size) + apply (rule order_less_le_trans[OF word_log2_max]) + apply (simp add: word_size wordRadix_def') + apply (subst word_le_nat_alt) + apply (subst unat_of_nat_eq) + apply (rule order_less_le_trans[OF word_log2_max], simp add: word_size) + apply (rule word_log2_highest) + apply (subst (asm) prioToL1Index_l1IndexToPrio_or_id) + apply (subst unat_of_nat_eq) + apply (rule order_less_le_trans[OF word_log2_max], simp add: word_size) + apply (rule order_less_le_trans[OF word_log2_max], simp add: word_size wordRadix_def') + apply (simp add: word_size wordRadix_def') + apply (drule (1) bitmapQ_no_L1_orphansD[where d=d and i="word_log2 _"]) + apply (simp add: l2BitmapSize_def') + apply simp + apply (rule prioToL1Index_le_index[rotated], simp) + apply (frule (2) bitmapQ_from_bitmap_lookup) + apply (clarsimp simp: bitmapQ_lookupBitmapPriority_simp) + apply (clarsimp simp: bitmapQ_def lookupBitmapPriority_def) + apply (subst prioToL1Index_l1IndexToPrio_or_id) + apply (subst unat_of_nat_eq) + apply (rule order_less_le_trans[OF word_log2_max], simp add: word_size) + apply (rule order_less_le_trans[OF word_log2_max], simp add: word_size wordRadix_def') + apply (fastforce dest: bitmapQ_no_L1_orphansD + simp: wordBits_def numPriorities_def word_size wordRadix_def' l2BitmapSize_def') + apply (erule word_log2_highest) + done + +lemma bitmapQ_ksReadyQueuesI: + "\ bitmapQ d p s ; valid_bitmapQ s \ \ ksReadyQueues s (d, p) \ []" + unfolding valid_bitmapQ_def by simp + +lemma getReadyQueuesL2Bitmap_inv[wp]: + "\ P \ getReadyQueuesL2Bitmap d i \\_. P\" + unfolding getReadyQueuesL2Bitmap_def by wp + +lemma switchToThread_lookupBitmapPriority_wp: + "\\s. invs_no_cicd' s \ bitmapQ (ksCurDomain s) (lookupBitmapPriority (ksCurDomain s) s) s \ + t = hd (ksReadyQueues s (ksCurDomain s, lookupBitmapPriority (ksCurDomain s) s)) \ + ThreadDecls_H.switchToThread t + \\rv. invs'\" +proof - + have switchToThread_pre: + "\s p t.\ valid_queues s ; bitmapQ (ksCurDomain s) p s ; t = hd (ksReadyQueues s (ksCurDomain s,p)) \ + \ st_tcb_at' runnable' t s \ tcb_in_cur_domain' t s" + unfolding valid_queues_def + apply (clarsimp dest!: bitmapQ_ksReadyQueuesI) + apply (case_tac "ksReadyQueues s (ksCurDomain s, p)", simp) + apply (rename_tac t ts) + apply (drule_tac t=t and p=p and d="ksCurDomain s" in valid_queues_no_bitmap_objD) + apply simp + apply (fastforce elim: obj_at'_weaken simp: inQ_def tcb_in_cur_domain'_def st_tcb_at'_def) + done + thus ?thesis + by (wp switchToThread_invs_no_cicd') (fastforce dest: invs_no_cicd'_queues) +qed + +lemma switchToIdleThread_invs_no_cicd': + "\invs_no_cicd'\ switchToIdleThread \\rv. invs'\" + apply (clarsimp simp: Thread_H.switchToIdleThread_def AARCH64_H.switchToIdleThread_def) + apply (wp setCurThread_invs_no_cicd'_idle_thread setVMRoot_invs_no_cicd' vcpuSwitch_it') + apply (clarsimp simp: all_invs_but_ct_idle_or_in_cur_domain'_def valid_idle'_def) + done + +crunch obj_at'[wp]: "Arch.switchToIdleThread" "obj_at' (P :: ('a :: no_vcpu) \ bool) t" + + +declare static_imp_conj_wp[wp_split del] + +lemma setCurThread_const: + "\\_. P t \ setCurThread t \\_ s. P (ksCurThread s) \" + by (simp add: setCurThread_def | wp)+ + + + +crunch it[wp]: switchToIdleThread "\s. P (ksIdleThread s)" +crunch it[wp]: switchToThread "\s. P (ksIdleThread s)" + +lemma switchToIdleThread_curr_is_idle: + "\\\ switchToIdleThread \\rv s. ksCurThread s = ksIdleThread s\" + apply (rule hoare_weaken_pre) + apply (wps switchToIdleThread_it) + apply (simp add: Thread_H.switchToIdleThread_def) + apply (wp setCurThread_const) + apply (simp) + done + +lemma chooseThread_it[wp]: + "\\s. P (ksIdleThread s)\ chooseThread \\_ s. P (ksIdleThread s)\" + supply if_split[split del] + by (wpsimp simp: chooseThread_def curDomain_def bitmap_fun_defs) + +lemma threadGet_inv [wp]: "\P\ threadGet f t \\rv. P\" + apply (simp add: threadGet_def) + apply (wp | simp)+ + done + +lemma corres_split_sched_act: + "\sched_act_relation act act'; + corres r P P' f1 g1; + \t. corres r (Q t) (Q' t) (f2 t) (g2 t); + corres r R R' f3 g3\ + \ corres r (case act of resume_cur_thread \ P + | switch_thread t \ Q t + | choose_new_thread \ R) + (case act' of ResumeCurrentThread \ P' + | SwitchToThread t \ Q' t + | ChooseThread \ R') + (case act of resume_cur_thread \ f1 + | switch_thread t \ f2 t + | choose_new_thread \ f3) + (case act' of ResumeCurrentThread \ g1 + | ChooseNewThread \ g3 + | SwitchToThread t \ g2 t)" + apply (cases act) + apply (rule corres_guard_imp, force+)+ + done + +lemma corres_assert_ret: + "corres dc (\s. P) \ (assert P) (return ())" + apply (rule corres_no_failI) + apply simp + apply (simp add: assert_def return_def fail_def) + done + +lemma corres_assert_assume_l: + "corres dc P Q (f ()) g + \ corres dc (P and (\s. P')) Q (assert P' >>= f) g" + by (force simp: corres_underlying_def assert_def return_def bind_def fail_def) + +lemma corres_assert_assume_r: + "corres dc P Q f (g ()) + \ corres dc P (Q and (\s. Q')) f (assert Q' >>= g)" + by (force simp: corres_underlying_def assert_def return_def bind_def fail_def) + +crunch cur[wp]: tcbSchedEnqueue cur_tcb' + (simp: unless_def) + +lemma thread_get_exs_valid[wp]: + "tcb_at t s \ \(=) s\ thread_get f t \\\r. (=) s\" + apply (clarsimp simp: get_thread_state_def assert_opt_def fail_def + thread_get_def gets_the_def exs_valid_def gets_def + get_def bind_def return_def split: option.splits) + apply (erule get_tcb_at) + done + +lemma gts_exs_valid[wp]: + "tcb_at t s \ \(=) s\ get_thread_state t \\\r. (=) s\" + apply (clarsimp simp: get_thread_state_def assert_opt_def fail_def + thread_get_def gets_the_def exs_valid_def gets_def + get_def bind_def return_def split: option.splits) + apply (erule get_tcb_at) + done + +lemma guarded_switch_to_corres: + "corres dc (valid_arch_state and valid_objs + and valid_vspace_objs and pspace_aligned and pspace_distinct + and valid_vs_lookup and valid_global_objs + and unique_table_refs + and st_tcb_at runnable t and valid_etcbs and (\s. sym_refs (state_hyp_refs_of s))) + (no_0_obj' and Invariants_H.valid_queues) + (guarded_switch_to t) (switchToThread t)" + apply (simp add: guarded_switch_to_def) + apply (rule corres_guard_imp) + apply (rule corres_symb_exec_l'[OF _ gts_exs_valid]) + apply (rule corres_assert_assume_l) + apply (rule switchToThread_corres) + apply (force simp: st_tcb_at_tcb_at) + apply (wp gts_st_tcb_at) + apply (force simp: st_tcb_at_tcb_at)+ + done + +abbreviation "enumPrio \ [0.e.maxPriority]" + +lemma enumPrio_word_div: + fixes v :: "8 word" + assumes vlt: "unat v \ unat maxPriority" + shows "\xs ys. enumPrio = xs @ [v] @ ys \ (\x\set xs. x < v) \ (\y\set ys. v < y)" + apply (subst upto_enum_word) + apply (subst upt_add_eq_append'[where j="unat v"]) + apply simp + apply (rule le_SucI) + apply (rule vlt) + apply (simp only: upt_conv_Cons vlt[simplified less_Suc_eq_le[symmetric]]) + apply (intro exI conjI) + apply fastforce + apply clarsimp + apply (drule of_nat_mono_maybe[rotated, where 'a="8"]) + apply (fastforce simp: vlt) + apply simp + apply (clarsimp simp: Suc_le_eq) + apply (erule disjE) + apply (drule of_nat_mono_maybe[rotated, where 'a="8"]) + apply (simp add: maxPriority_def numPriorities_def) + apply (clarsimp simp: unat_of_nat_eq) + apply (erule conjE) + apply (drule_tac y="unat v" and x="x" in of_nat_mono_maybe[rotated, where 'a="8"]) + apply (simp add: maxPriority_def numPriorities_def)+ + done + +lemma curDomain_corres: "corres (=) \ \ (gets cur_domain) (curDomain)" + by (simp add: curDomain_def state_relation_def) + +lemma curDomain_corres': + "corres (=) \ (\s. ksCurDomain s \ maxDomain) + (gets cur_domain) (if 1 < numDomains then curDomain else return 0)" + apply (case_tac "1 < numDomains"; simp) + apply (rule corres_guard_imp[OF curDomain_corres]; solves simp) + (* if we have only one domain, then we are in it *) + apply (clarsimp simp: return_def simpler_gets_def bind_def maxDomain_def + state_relation_def corres_underlying_def) + done + +lemma lookupBitmapPriority_Max_eqI: + "\ valid_bitmapQ s ; bitmapQ_no_L1_orphans s ; ksReadyQueuesL1Bitmap s d \ 0 \ + \ lookupBitmapPriority d s = (Max {prio. ksReadyQueues s (d, prio) \ []})" + apply (rule Max_eqI[simplified eq_commute]; simp) + apply (fastforce simp: bitmapL1_highest_lookup valid_bitmapQ_bitmapQ_simp) + apply (metis valid_bitmapQ_bitmapQ_simp bitmapQ_from_bitmap_lookup) + done + +lemma corres_gets_queues_getReadyQueuesL1Bitmap: + "corres (\qs l1. ((l1 = 0) = (\p. qs p = []))) \ valid_queues + (gets (\s. ready_queues s d)) (getReadyQueuesL1Bitmap d)" + unfolding state_relation_def valid_queues_def getReadyQueuesL1Bitmap_def + by (clarsimp simp: bitmapL1_zero_ksReadyQueues ready_queues_relation_def) + +lemma guarded_switch_to_chooseThread_fragment_corres: + "corres dc + (P and st_tcb_at runnable t and invs and valid_sched) + (P' and st_tcb_at' runnable' t and invs_no_cicd') + (guarded_switch_to t) + (do runnable \ isRunnable t; + y \ assert runnable; + ThreadDecls_H.switchToThread t + od)" + unfolding guarded_switch_to_def isRunnable_def + apply simp + apply (rule corres_guard_imp) + apply (rule corres_split[OF getThreadState_corres]) + apply (rule corres_assert_assume_l) + apply (rule corres_assert_assume_r) + apply (rule switchToThread_corres) + apply (wp gts_st_tcb_at)+ + apply (clarsimp simp: st_tcb_at_tcb_at invs_def valid_state_def valid_pspace_def valid_sched_def + invs_valid_vs_lookup invs_unique_refs) + apply (auto elim!: pred_tcb'_weakenE split: thread_state.splits + simp: pred_tcb_at' runnable'_def all_invs_but_ct_idle_or_in_cur_domain'_def) + done + +lemma bitmap_lookup_queue_is_max_non_empty: + "\ valid_queues s'; (s, s') \ state_relation; invs s; + ksReadyQueuesL1Bitmap s' (ksCurDomain s') \ 0 \ + \ ksReadyQueues s' (ksCurDomain s', lookupBitmapPriority (ksCurDomain s') s') = + max_non_empty_queue (ready_queues s (cur_domain s))" + unfolding all_invs_but_ct_idle_or_in_cur_domain'_def valid_queues_def + by (clarsimp simp add: max_non_empty_queue_def lookupBitmapPriority_Max_eqI + state_relation_def ready_queues_relation_def) + +lemma ksReadyQueuesL1Bitmap_return_wp: + "\\s. P (ksReadyQueuesL1Bitmap s d) s \ getReadyQueuesL1Bitmap d \\rv s. P rv s\" + unfolding getReadyQueuesL1Bitmap_def + by wp + +lemma ksReadyQueuesL1Bitmap_st_tcb_at': + "\ ksReadyQueuesL1Bitmap s (ksCurDomain s) \ 0 ; valid_queues s \ + \ st_tcb_at' runnable' (hd (ksReadyQueues s (ksCurDomain s, lookupBitmapPriority (ksCurDomain s) s))) s" + apply (drule bitmapQ_from_bitmap_lookup; clarsimp simp: valid_queues_def) + apply (clarsimp simp add: valid_bitmapQ_bitmapQ_simp) + apply (case_tac "ksReadyQueues s (ksCurDomain s, lookupBitmapPriority (ksCurDomain s) s)") + apply simp + apply (simp add: valid_queues_no_bitmap_def) + apply (erule_tac x="ksCurDomain s" in allE) + apply (erule_tac x="lookupBitmapPriority (ksCurDomain s) s" in allE) + apply (clarsimp simp: st_tcb_at'_def) + apply (erule obj_at'_weaken) + apply simp + done + +lemma curDomain_or_return_0: + "\ \P\ curDomain \\rv s. Q rv s \; \s. P s \ ksCurDomain s \ maxDomain \ + \ \P\ if 1 < numDomains then curDomain else return 0 \\rv s. Q rv s \" + apply (case_tac "1 < numDomains"; simp) + apply (simp add: valid_def curDomain_def simpler_gets_def return_def maxDomain_def) + done + +lemma invs_no_cicd_ksCurDomain_maxDomain': + "invs_no_cicd' s \ ksCurDomain s \ maxDomain" + unfolding invs_no_cicd'_def by simp + +lemma chooseThread_corres: + "corres dc (invs and valid_sched) (invs_no_cicd') + choose_thread chooseThread" (is "corres _ ?PREI ?PREH _ _") +proof - + show ?thesis + unfolding choose_thread_def chooseThread_def + apply (simp only: return_bind Let_def) + apply (subst if_swap[where P="_ \ 0"]) (* put switchToIdleThread on first branch*) + apply (rule corres_guard_imp) + apply (rule corres_split[OF curDomain_corres']) + apply clarsimp + apply (rule corres_split[OF corres_gets_queues_getReadyQueuesL1Bitmap]) + apply (erule corres_if2[OF sym]) + apply (rule switchToIdleThread_corres) + apply (rule corres_symb_exec_r) + apply (rule corres_symb_exec_r) + apply (rule_tac + P="\s. ?PREI s \ queues = ready_queues s (cur_domain s) \ + st_tcb_at runnable (hd (max_non_empty_queue queues)) s" and + P'="\s. (?PREH s \ st_tcb_at' runnable' (hd queue) s) \ + l1 = ksReadyQueuesL1Bitmap s (ksCurDomain s) \ + l1 \ 0 \ + queue = ksReadyQueues s (ksCurDomain s, + lookupBitmapPriority (ksCurDomain s) s)" and + F="hd queue = hd (max_non_empty_queue queues)" in corres_req) + apply (fastforce dest!: invs_no_cicd'_queues simp: bitmap_lookup_queue_is_max_non_empty) + apply clarsimp + apply (rule corres_guard_imp) + apply (rule_tac P=\ and P'=\ in guarded_switch_to_chooseThread_fragment_corres) + apply (wpsimp simp: getQueue_def getReadyQueuesL2Bitmap_def)+ + apply (clarsimp simp: if_apply_def2) + apply (wp hoare_vcg_conj_lift hoare_vcg_imp_lift ksReadyQueuesL1Bitmap_return_wp) + apply (wpsimp wp: curDomain_or_return_0 simp: curDomain_def)+ + apply (fastforce simp: invs_no_cicd'_def) + apply (clarsimp simp: valid_sched_def DetSchedInvs_AI.valid_queues_def max_non_empty_queue_def) + apply (erule_tac x="cur_domain s" in allE) + apply (erule_tac x="Max {prio. ready_queues s (cur_domain s) prio \ []}" in allE) + apply (case_tac "ready_queues s (cur_domain s) (Max {prio. ready_queues s (cur_domain s) prio \ []})") + apply (clarsimp) + apply (subgoal_tac + "ready_queues s (cur_domain s) (Max {prio. ready_queues s (cur_domain s) prio \ []}) \ []") + apply (fastforce elim!: setcomp_Max_has_prop)+ + apply (simp add: invs_no_cicd_ksCurDomain_maxDomain') + apply (clarsimp dest!: invs_no_cicd'_queues) + apply (fastforce intro: ksReadyQueuesL1Bitmap_st_tcb_at') + done +qed + +lemma thread_get_comm: "do x \ thread_get f p; y \ gets g; k x y od = + do y \ gets g; x \ thread_get f p; k x y od" + apply (rule ext) + apply (clarsimp simp add: gets_the_def assert_opt_def + bind_def gets_def get_def return_def + thread_get_def + fail_def split: option.splits) + done + +lemma schact_bind_inside: "do x \ f; (case act of resume_cur_thread \ f1 x + | switch_thread t \ f2 t x + | choose_new_thread \ f3 x) od + = (case act of resume_cur_thread \ (do x \ f; f1 x od) + | switch_thread t \ (do x \ f; f2 t x od) + | choose_new_thread \ (do x \ f; f3 x od))" + apply (case_tac act,simp_all) + done + +interpretation tcb_sched_action_extended: is_extended' "tcb_sched_action f a" + by (unfold_locales) + +lemma getDomainTime_corres: + "corres (=) \ \ (gets domain_time) getDomainTime" + by (simp add: getDomainTime_def state_relation_def) + +lemma nextDomain_corres: + "corres dc \ \ next_domain nextDomain" + apply (simp add: next_domain_def nextDomain_def) + apply (rule corres_modify) + apply (simp add: state_relation_def Let_def dschLength_def dschDomain_def) + done + +lemma next_domain_valid_sched[wp]: + "\ valid_sched and (\s. scheduler_action s = choose_new_thread)\ next_domain \ \_. valid_sched \" + apply (simp add: next_domain_def Let_def) + apply (wp, simp add: valid_sched_def valid_sched_action_2_def ct_not_in_q_2_def) + apply (simp add:valid_blocked_2_def) + done + +lemma nextDomain_invs_no_cicd': + "\ invs' and (\s. ksSchedulerAction s = ChooseNewThread)\ nextDomain \ \_. invs_no_cicd' \" + apply (simp add: nextDomain_def Let_def dschLength_def dschDomain_def) + apply wp + apply (clarsimp simp: invs'_def valid_state'_def valid_machine_state'_def + ct_not_inQ_def cur_tcb'_def ct_idle_or_in_cur_domain'_def dschDomain_def + all_invs_but_ct_idle_or_in_cur_domain'_def) + done + +lemma scheduleChooseNewThread_fragment_corres: + "corres dc (invs and valid_sched and (\s. scheduler_action s = choose_new_thread)) (invs' and (\s. ksSchedulerAction s = ChooseNewThread)) + (do _ \ when (domainTime = 0) next_domain; + choose_thread + od) + (do _ \ when (domainTime = 0) nextDomain; + chooseThread + od)" + apply (subst bind_dummy_ret_val) + apply (subst bind_dummy_ret_val) + apply (rule corres_guard_imp) + apply (rule corres_split) + apply (rule corres_when, simp) + apply (rule nextDomain_corres) + apply simp + apply (rule chooseThread_corres) + apply (wp nextDomain_invs_no_cicd')+ + apply (clarsimp simp: valid_sched_def invs'_def valid_state'_def all_invs_but_ct_idle_or_in_cur_domain'_def)+ + done + +lemma scheduleSwitchThreadFastfail_corres: + "\ ct \ it \ (tp = tp' \ cp = cp') ; ct = ct' ; it = it' \ \ + corres ((=)) (is_etcb_at ct) (tcb_at' ct) + (schedule_switch_thread_fastfail ct it cp tp) + (scheduleSwitchThreadFastfail ct' it' cp' tp')" + by (clarsimp simp: schedule_switch_thread_fastfail_def scheduleSwitchThreadFastfail_def) + +lemma gets_is_highest_prio_expand: + "gets (is_highest_prio d p) \ do + q \ gets (\s. ready_queues s d); + return ((\p. q p = []) \ Max {prio. q prio \ []} \ p) + od" + by (clarsimp simp: is_highest_prio_def gets_def) + +lemma isHighestPrio_corres: + assumes "d' = d" + assumes "p' = p" + shows + "corres ((=)) \ valid_queues + (gets (is_highest_prio d p)) + (isHighestPrio d' p')" + using assms + apply (clarsimp simp: gets_is_highest_prio_expand isHighestPrio_def) + apply (subst getHighestPrio_def') + apply (rule corres_guard_imp) + apply (rule corres_split[OF corres_gets_queues_getReadyQueuesL1Bitmap]) + apply (rule corres_if_r'[where P'="\_. True",rotated]) + apply (rule_tac corres_symb_exec_r) + apply (rule_tac + P="\s. q = ready_queues s d + " and + P'="\s. valid_queues s \ + l1 = ksReadyQueuesL1Bitmap s d \ + l1 \ 0 \ hprio = lookupBitmapPriority d s" and + F="hprio = Max {prio. q prio \ []}" in corres_req) + apply (elim conjE) + apply (clarsimp simp: valid_queues_def) + apply (subst lookupBitmapPriority_Max_eqI; blast?) + apply (fastforce simp: ready_queues_relation_def dest!: state_relationD) + apply fastforce + apply (wpsimp simp: if_apply_def2 wp: hoare_drop_imps ksReadyQueuesL1Bitmap_return_wp)+ + done + +crunch valid_idle_etcb[wp]: set_scheduler_action valid_idle_etcb + +crunch inv[wp]: isHighestPrio P +crunch inv[wp]: curDomain P +crunch inv[wp]: scheduleSwitchThreadFastfail P + +lemma setSchedulerAction_invs': (* not in wp set, clobbered by ssa_wp *) + "\\s. invs' s \ setSchedulerAction ChooseNewThread \\_. invs' \" + by (wpsimp simp: invs'_def cur_tcb'_def valid_state'_def valid_irq_node'_def ct_not_inQ_def + valid_queues_def valid_queues_no_bitmap_def valid_queues'_def + ct_idle_or_in_cur_domain'_def) + +lemma scheduleChooseNewThread_corres: + "corres dc + (\s. invs s \ valid_sched s \ scheduler_action s = choose_new_thread) + (\s. invs' s \ ksSchedulerAction s = ChooseNewThread) + schedule_choose_new_thread scheduleChooseNewThread" + unfolding schedule_choose_new_thread_def scheduleChooseNewThread_def + apply (rule corres_guard_imp) + apply (rule corres_split[OF getDomainTime_corres], clarsimp) + apply (rule corres_split[OF scheduleChooseNewThread_fragment_corres, simplified bind_assoc]) + apply (rule setSchedulerAction_corres) + apply (wp | simp)+ + apply (wp | simp add: getDomainTime_def)+ + apply auto + done + +lemma ethread_get_when_corres: + assumes x: "\etcb tcb'. etcb_relation etcb tcb' \ r (f etcb) (f' tcb')" + shows "corres (\rv rv'. b \ r rv rv') (is_etcb_at t) (tcb_at' t) + (ethread_get_when b f t) (threadGet f' t)" + apply (clarsimp simp: ethread_get_when_def) + apply (rule conjI; clarsimp) + apply (rule corres_guard_imp, rule ethreadget_corres; simp add: x) + apply (clarsimp simp: threadGet_def) + apply (rule corres_noop) + apply wpsimp+ + done + +lemma schedule_corres: + "corres dc (invs and valid_sched and valid_list) invs' (Schedule_A.schedule) ThreadDecls_H.schedule" + supply ethread_get_wp[wp del] + supply ssa_wp[wp del] + supply tcbSchedEnqueue_invs'[wp del] + supply tcbSchedEnqueue_invs'_not_ResumeCurrentThread[wp del] + supply setSchedulerAction_direct[wp] + supply if_split[split del] + + apply (clarsimp simp: Schedule_A.schedule_def Thread_H.schedule_def) + apply (subst thread_get_test) + apply (subst thread_get_comm) + apply (subst schact_bind_inside) + apply (rule corres_guard_imp) + apply (rule corres_split[OF getCurThread_corres[THEN corres_rel_imp[where r="\x y. y = x"],simplified]]) + apply (rule corres_split[OF getSchedulerAction_corres]) + apply (rule corres_split_sched_act,assumption) + apply (rule_tac P="tcb_at ct" in corres_symb_exec_l') + apply (rule_tac corres_symb_exec_l) + apply simp + apply (rule corres_assert_ret) + apply ((wpsimp wp: thread_get_wp' gets_exs_valid)+) + prefer 2 + (* choose thread *) + apply clarsimp + apply (rule corres_split[OF thread_get_isRunnable_corres]) + apply (rule corres_split) + apply (rule corres_when, simp) + apply (rule tcbSchedEnqueue_corres) + apply (rule scheduleChooseNewThread_corres, simp) + apply (wp thread_get_wp' tcbSchedEnqueue_invs' hoare_vcg_conj_lift hoare_drop_imps + | clarsimp)+ + (* switch to thread *) + apply (rule corres_split[OF thread_get_isRunnable_corres], + rename_tac was_running wasRunning) + apply (rule corres_split) + apply (rule corres_when, simp) + apply (rule tcbSchedEnqueue_corres) + apply (rule corres_split[OF getIdleThread_corres], rename_tac it it') + apply (rule_tac F="was_running \ ct \ it" in corres_gen_asm) + apply (rule corres_split) + apply (rule ethreadget_corres[where r="(=)"]) + apply (clarsimp simp: etcb_relation_def) + apply (rename_tac tp tp') + apply (rule corres_split) + apply (rule ethread_get_when_corres[where r="(=)"]) + apply (clarsimp simp: etcb_relation_def) + apply (rename_tac cp cp') + apply (rule corres_split) + apply (rule scheduleSwitchThreadFastfail_corres; simp) + apply (rule corres_split[OF curDomain_corres]) + apply (rule corres_split[OF isHighestPrio_corres]; simp only:) + apply (rule corres_if, simp) + apply (rule corres_split[OF tcbSchedEnqueue_corres]) + apply (simp, fold dc_def) + apply (rule corres_split) + apply (rule setSchedulerAction_corres; simp) + apply (rule scheduleChooseNewThread_corres) + apply (wp | simp)+ + apply (simp add: valid_sched_def) + apply wp + apply (rule hoare_vcg_conj_lift) + apply (rule_tac t=t in set_scheduler_action_cnt_valid_blocked') + apply (wpsimp wp: setSchedulerAction_invs')+ + apply (wp tcb_sched_action_enqueue_valid_blocked hoare_vcg_all_lift enqueue_thread_queued) + apply (wp tcbSchedEnqueue_invs'_not_ResumeCurrentThread) + apply (rule corres_if, fastforce) + apply (rule corres_split[OF tcbSchedAppend_corres]) + apply (simp, fold dc_def) + apply (rule corres_split) + apply (rule setSchedulerAction_corres; simp) + apply (rule scheduleChooseNewThread_corres) + apply (wp | simp)+ + apply (simp add: valid_sched_def) + apply wp + apply (rule hoare_vcg_conj_lift) + apply (rule_tac t=t in set_scheduler_action_cnt_valid_blocked') + apply (wpsimp wp: setSchedulerAction_invs')+ + apply (wp tcb_sched_action_append_valid_blocked hoare_vcg_all_lift append_thread_queued) + apply (wp tcbSchedAppend_invs'_not_ResumeCurrentThread) + + apply (rule corres_split[OF guarded_switch_to_corres], simp) + apply (rule setSchedulerAction_corres[simplified dc_def]) + apply (wp | simp)+ + + (* isHighestPrio *) + apply (clarsimp simp: if_apply_def2) + apply ((wp (once) hoare_drop_imp)+)[1] + + apply (simp add: if_apply_def2) + apply ((wp (once) hoare_drop_imp)+)[1] + apply wpsimp+ + + apply (clarsimp simp: conj_ac cong: conj_cong) + apply wp + apply (rule_tac Q="\_ s. valid_blocked_except t s \ scheduler_action s = switch_thread t" + in hoare_post_imp, fastforce) + apply (wp add: tcb_sched_action_enqueue_valid_blocked_except + tcbSchedEnqueue_invs'_not_ResumeCurrentThread thread_get_wp + del: gets_wp)+ + apply (clarsimp simp: conj_ac if_apply_def2 cong: imp_cong conj_cong del: hoare_gets) + apply (wp gets_wp)+ + + (* abstract final subgoal *) + apply clarsimp + + subgoal for s + apply (clarsimp split: Deterministic_A.scheduler_action.splits + simp: invs_psp_aligned invs_distinct invs_valid_objs invs_arch_state + invs_vspace_objs[simplified] tcb_at_invs) + apply (rule conjI, clarsimp) + apply (fastforce simp: invs_def + valid_sched_def valid_sched_action_def is_activatable_def + st_tcb_at_def obj_at_def valid_state_def only_idle_def + ) + apply (rule conjI, clarsimp) + subgoal for candidate + apply (clarsimp simp: valid_sched_def invs_def valid_state_def cur_tcb_def + valid_arch_caps_def valid_sched_action_def + weak_valid_sched_action_def tcb_at_is_etcb_at + tcb_at_is_etcb_at[OF st_tcb_at_tcb_at[rotated]] + valid_blocked_except_def valid_blocked_def invs_hyp_sym_refs) + apply (clarsimp simp add: pred_tcb_at_def obj_at_def is_tcb valid_idle_def) + done + (* choose new thread case *) + apply (intro impI conjI allI tcb_at_invs + | fastforce simp: invs_def cur_tcb_def valid_etcbs_def + valid_sched_def st_tcb_at_def obj_at_def valid_state_def + weak_valid_sched_action_def not_cur_thread_def)+ + apply (simp add: valid_sched_def valid_blocked_def valid_blocked_except_def) + done + + (* haskell final subgoal *) + apply (clarsimp simp: if_apply_def2 invs'_def valid_state'_def + cong: imp_cong split: scheduler_action.splits) + apply (fastforce simp: cur_tcb'_def valid_pspace'_def) + done + +lemma ssa_all_invs_but_ct_not_inQ': + "\all_invs_but_ct_not_inQ' and sch_act_wf sa and + (\s. sa = ResumeCurrentThread \ ksCurThread s = ksIdleThread s \ tcb_in_cur_domain' (ksCurThread s) s)\ + setSchedulerAction sa \\rv. all_invs_but_ct_not_inQ'\" +proof - + show ?thesis + apply (simp add: setSchedulerAction_def) + apply wp + apply (clarsimp simp add: invs'_def valid_state'_def cur_tcb'_def + Invariants_H.valid_queues_def + state_refs_of'_def ps_clear_def + valid_irq_node'_def valid_queues'_def + tcb_in_cur_domain'_def ct_idle_or_in_cur_domain'_def + bitmapQ_defs valid_queues_no_bitmap_def + cong: option.case_cong) + done +qed + +lemma ssa_ct_not_inQ: + "\\s. sa = ResumeCurrentThread \ obj_at' (Not \ tcbQueued) (ksCurThread s) s\ + setSchedulerAction sa \\rv. ct_not_inQ\" + by (simp add: setSchedulerAction_def ct_not_inQ_def, wp, clarsimp) + +lemma ssa_all_invs_but_ct_not_inQ''[simplified]: + "\\s. (all_invs_but_ct_not_inQ' s \ sch_act_wf sa s) + \ (sa = ResumeCurrentThread \ ksCurThread s = ksIdleThread s \ tcb_in_cur_domain' (ksCurThread s) s) + \ (sa = ResumeCurrentThread \ obj_at' (Not \ tcbQueued) (ksCurThread s) s)\ + setSchedulerAction sa \\rv. invs'\" + apply (simp only: all_invs_but_not_ct_inQ_check' [symmetric]) + apply (rule hoare_elim_pred_conj) + apply (wp hoare_vcg_conj_lift [OF ssa_all_invs_but_ct_not_inQ' ssa_ct_not_inQ]) + apply (clarsimp) + done + +lemma ssa_invs': + "\invs' and sch_act_wf sa and + (\s. sa = ResumeCurrentThread \ ksCurThread s = ksIdleThread s \ tcb_in_cur_domain' (ksCurThread s) s) and + (\s. sa = ResumeCurrentThread \ obj_at' (Not \ tcbQueued) (ksCurThread s) s)\ + setSchedulerAction sa \\rv. invs'\" + apply (wp ssa_all_invs_but_ct_not_inQ'') + apply (clarsimp simp add: invs'_def valid_state'_def) + done + +lemma getDomainTime_wp[wp]: "\\s. P (ksDomainTime s) s \ getDomainTime \ P \" + unfolding getDomainTime_def + by wp + +lemma switchToThread_ct_not_queued_2: + "\invs_no_cicd' and tcb_at' t\ switchToThread t \\rv s. obj_at' (Not \ tcbQueued) (ksCurThread s) s\" + (is "\_\ _ \\_. ?POST\") + apply (simp add: Thread_H.switchToThread_def) + apply (wp) + apply (simp add: AARCH64_H.switchToThread_def setCurThread_def) + apply (wp tcbSchedDequeue_not_tcbQueued | simp )+ + done + +lemma setCurThread_obj_at': + "\ obj_at' P t \ setCurThread t \\rv s. obj_at' P (ksCurThread s) s \" +proof - + show ?thesis + apply (simp add: setCurThread_def) + apply wp + apply (simp add: ct_in_state'_def st_tcb_at'_def) + done +qed + +lemma switchToIdleThread_ct_not_queued_no_cicd': + "\ invs_no_cicd' \ switchToIdleThread \\rv s. obj_at' (Not \ tcbQueued) (ksCurThread s) s \" + apply (simp add: Thread_H.switchToIdleThread_def) + apply (wp setCurThread_obj_at') + apply (rule idle'_not_tcbQueued') + apply (simp add: invs_no_cicd'_def)+ + done + +lemma switchToIdleThread_activatable_2[wp]: + "\invs_no_cicd'\ switchToIdleThread \\rv. ct_in_state' activatable'\" + apply (simp add: Thread_H.switchToIdleThread_def + AARCH64_H.switchToIdleThread_def) + apply (wp setCurThread_ct_in_state) + apply (clarsimp simp: all_invs_but_ct_idle_or_in_cur_domain'_def valid_state'_def valid_idle'_def + pred_tcb_at'_def obj_at'_def idle_tcb'_def) + done + +lemma switchToThread_tcb_in_cur_domain': + "\tcb_in_cur_domain' thread\ + ThreadDecls_H.switchToThread thread + \\y s. tcb_in_cur_domain' (ksCurThread s) s\" + apply (simp add: Thread_H.switchToThread_def setCurThread_def) + apply (wpsimp wp: tcbSchedDequeue_not_tcbQueued) + done + +lemma chooseThread_invs_no_cicd'_posts: (* generic version *) + "\ invs_no_cicd' \ chooseThread + \\rv s. obj_at' (Not \ tcbQueued) (ksCurThread s) s \ + ct_in_state' activatable' s \ + (ksCurThread s = ksIdleThread s \ tcb_in_cur_domain' (ksCurThread s) s) \" + (is "\_\ _ \\_. ?POST\") +proof - + note switchToThread_invs[wp del] + note switchToThread_invs_no_cicd'[wp del] + note switchToThread_lookupBitmapPriority_wp[wp] + note assert_wp[wp del] + note if_split[split del] + + (* if we only have one domain, we are in it *) + have one_domain_case: + "\s. \ invs_no_cicd' s; numDomains \ 1 \ \ ksCurDomain s = 0" + by (simp add: all_invs_but_ct_idle_or_in_cur_domain'_def maxDomain_def) + + show ?thesis + unfolding chooseThread_def Let_def curDomain_def + apply (simp only: return_bind, simp) + apply (rule hoare_seq_ext[where B="\rv s. invs_no_cicd' s \ rv = ksCurDomain s"]) + apply (rule_tac B="\rv s. invs_no_cicd' s \ curdom = ksCurDomain s \ + rv = ksReadyQueuesL1Bitmap s curdom" in hoare_seq_ext) + apply (rename_tac l1) + apply (case_tac "l1 = 0") + (* switch to idle thread *) + apply simp + apply (rule hoare_pre) + apply (wp (once) switchToIdleThread_ct_not_queued_no_cicd') + apply (wp (once)) + apply ((wp hoare_disjI1 switchToIdleThread_curr_is_idle)+)[1] + apply simp + (* we have a thread to switch to *) + apply (clarsimp simp: bitmap_fun_defs) + apply (wp assert_inv switchToThread_ct_not_queued_2 assert_inv hoare_disjI2 + switchToThread_tcb_in_cur_domain') + apply clarsimp + apply (clarsimp dest!: invs_no_cicd'_queues + simp: valid_queues_def lookupBitmapPriority_def[symmetric]) + apply (drule (3) lookupBitmapPriority_obj_at') + apply normalise_obj_at' + apply (fastforce simp: tcb_in_cur_domain'_def inQ_def elim: obj_at'_weaken) + apply (wpsimp simp: bitmap_fun_defs curDomain_def one_domain_case)+ + done +qed + +lemma chooseThread_activatable_2: + "\invs_no_cicd'\ chooseThread \\rv. ct_in_state' activatable'\" + apply (rule hoare_pre, rule hoare_strengthen_post) + apply (rule chooseThread_invs_no_cicd'_posts) + apply simp+ + done + +lemma chooseThread_ct_not_queued_2: + "\ invs_no_cicd'\ chooseThread \\rv s. obj_at' (Not \ tcbQueued) (ksCurThread s) s\" + (is "\_\ _ \\_. ?POST\") + apply (rule hoare_pre, rule hoare_strengthen_post) + apply (rule chooseThread_invs_no_cicd'_posts) + apply simp+ + done + +lemma chooseThread_invs_no_cicd': + "\ invs_no_cicd' \ chooseThread \\rv. invs' \" +proof - + note switchToThread_invs[wp del] + note switchToThread_invs_no_cicd'[wp del] + note switchToThread_lookupBitmapPriority_wp[wp] + note assert_wp[wp del] + note if_split[split del] + + (* if we only have one domain, we are in it *) + have one_domain_case: + "\s. \ invs_no_cicd' s; numDomains \ 1 \ \ ksCurDomain s = 0" + by (simp add: all_invs_but_ct_idle_or_in_cur_domain'_def maxDomain_def) + + (* NOTE: do *not* unfold numDomains in the rest of the proof, + it should work for any number *) + + (* FIXME this is almost identical to the chooseThread_invs_no_cicd'_posts proof, can generalise? *) + show ?thesis + unfolding chooseThread_def Let_def curDomain_def + apply (simp only: return_bind, simp) + apply (rule hoare_seq_ext[where B="\rv s. invs_no_cicd' s \ rv = ksCurDomain s"]) + apply (rule_tac B="\rv s. invs_no_cicd' s \ curdom = ksCurDomain s \ + rv = ksReadyQueuesL1Bitmap s curdom" in hoare_seq_ext) + apply (rename_tac l1) + apply (case_tac "l1 = 0") + (* switch to idle thread *) + apply (simp, wp (once) switchToIdleThread_invs_no_cicd', simp) + (* we have a thread to switch to *) + apply (clarsimp simp: bitmap_fun_defs) + apply (wp assert_inv) + apply (clarsimp dest!: invs_no_cicd'_queues simp: valid_queues_def) + apply (fastforce elim: bitmapQ_from_bitmap_lookup simp: lookupBitmapPriority_def) + apply (wpsimp simp: bitmap_fun_defs curDomain_def one_domain_case)+ + done +qed + +lemma chooseThread_in_cur_domain': + "\ invs_no_cicd' \ chooseThread \\rv s. ksCurThread s = ksIdleThread s \ tcb_in_cur_domain' (ksCurThread s) s\" + apply (rule hoare_pre, rule hoare_strengthen_post) + apply (rule chooseThread_invs_no_cicd'_posts, simp_all) + done + +lemma scheduleChooseNewThread_invs': + "\ invs' and (\s. ksSchedulerAction s = ChooseNewThread) \ + scheduleChooseNewThread + \ \_ s. invs' s \" + unfolding scheduleChooseNewThread_def + apply (wpsimp wp: ssa_invs' chooseThread_invs_no_cicd' chooseThread_ct_not_queued_2 + chooseThread_activatable_2 chooseThread_invs_no_cicd' + chooseThread_in_cur_domain' nextDomain_invs_no_cicd' chooseThread_ct_not_queued_2) + apply (clarsimp simp: invs'_to_invs_no_cicd'_def) + done + +lemma schedule_invs': + "\invs'\ ThreadDecls_H.schedule \\rv. invs'\" + supply ssa_wp[wp del] + apply (simp add: schedule_def) + apply (rule_tac hoare_seq_ext, rename_tac t) + apply (wp, wpc) + \ \action = ResumeCurrentThread\ + apply (wp)[1] + \ \action = ChooseNewThread\ + apply (wp scheduleChooseNewThread_invs') + \ \action = SwitchToThread candidate\ + apply (wpsimp wp: scheduleChooseNewThread_invs' ssa_invs' + chooseThread_invs_no_cicd' setSchedulerAction_invs' setSchedulerAction_direct + switchToThread_tcb_in_cur_domain' switchToThread_ct_not_queued_2 + | wp hoare_disjI2[where Q="\_ s. tcb_in_cur_domain' (ksCurThread s) s"] + | wp hoare_drop_imp[where f="isHighestPrio d p" for d p] + | simp only: obj_at'_activatable_st_tcb_at'[simplified comp_def] + | strengthen invs'_invs_no_cicd + | wp hoare_vcg_imp_lift)+ + apply (frule invs_sch_act_wf') + apply (auto simp: invs_sch_act_wf' obj_at'_activatable_st_tcb_at' + st_tcb_at'_runnable_is_activatable) + done + +lemma setCurThread_nosch: + "\\s. P (ksSchedulerAction s)\ + setCurThread t + \\rv s. P (ksSchedulerAction s)\" + apply (simp add: setCurThread_def) + apply wp + apply simp + done + +lemma stt_nosch: + "\\s. P (ksSchedulerAction s)\ + switchToThread t + \\rv s. P (ksSchedulerAction s)\" + apply (simp add: Thread_H.switchToThread_def AARCH64_H.switchToThread_def storeWordUser_def) + apply (wp setCurThread_nosch hoare_drop_imp |simp)+ + done + +lemma stit_nosch[wp]: + "\\s. P (ksSchedulerAction s)\ + switchToIdleThread + \\rv s. P (ksSchedulerAction s)\" + apply (simp add: Thread_H.switchToIdleThread_def + AARCH64_H.switchToIdleThread_def storeWordUser_def) + apply (wp setCurThread_nosch | simp add: getIdleThread_def)+ + done + +lemma schedule_sch: + "\\\ schedule \\rv s. ksSchedulerAction s = ResumeCurrentThread\" + by (wp setSchedulerAction_direct | wpc| simp add: schedule_def scheduleChooseNewThread_def)+ + +lemma schedule_sch_act_simple: + "\\\ schedule \\rv. sch_act_simple\" + apply (rule hoare_strengthen_post [OF schedule_sch]) + apply (simp add: sch_act_simple_def) + done + +lemma ssa_ct: + "\ct_in_state' P\ setSchedulerAction sa \\rv. ct_in_state' P\" +proof - + show ?thesis + apply (unfold setSchedulerAction_def) + apply wp + apply (clarsimp simp add: ct_in_state'_def pred_tcb_at'_def) + done +qed + +lemma scheduleChooseNewThread_ct_activatable'[wp]: + "\ invs' and (\s. ksSchedulerAction s = ChooseNewThread) \ + scheduleChooseNewThread + \\_. ct_in_state' activatable'\" + unfolding scheduleChooseNewThread_def + by (wpsimp simp: ct_in_state'_def + wp: ssa_invs' nextDomain_invs_no_cicd' + chooseThread_activatable_2[simplified ct_in_state'_def] + | (rule hoare_lift_Pf[where f=ksCurThread], solves wp) + | strengthen invs'_invs_no_cicd)+ + +lemma schedule_ct_activatable'[wp]: + "\invs'\ ThreadDecls_H.schedule \\_. ct_in_state' activatable'\" + supply ssa_wp[wp del] + apply (simp add: schedule_def) + apply (rule_tac hoare_seq_ext, rename_tac t) + apply (wp, wpc) + \ \action = ResumeCurrentThread\ + apply (wp)[1] + \ \action = ChooseNewThread\ + apply wpsimp + \ \action = SwitchToThread\ + apply (wpsimp wp: ssa_invs' setSchedulerAction_direct ssa_ct + | wp hoare_drop_imp[where f="isHighestPrio d p" for d p] + | simp only: obj_at'_activatable_st_tcb_at'[simplified comp_def] + | strengthen invs'_invs_no_cicd + | wp hoare_vcg_imp_lift)+ + apply (fastforce dest: invs_sch_act_wf' elim: pred_tcb'_weakenE + simp: sch_act_wf obj_at'_activatable_st_tcb_at') + done + +lemma threadSet_sch_act_sane: + "\sch_act_sane\ threadSet f t \\_. sch_act_sane\" + by (wp sch_act_sane_lift) + +lemma rescheduleRequired_sch_act_sane[wp]: + "\\\ rescheduleRequired \\rv. sch_act_sane\" + apply (simp add: rescheduleRequired_def sch_act_sane_def + setSchedulerAction_def) + by (wp | wpc | clarsimp)+ + +lemma sts_sch_act_sane: + "\sch_act_sane\ setThreadState st t \\_. sch_act_sane\" + apply (simp add: setThreadState_def) + including no_pre + apply (wp hoare_drop_imps + | simp add: threadSet_sch_act_sane)+ + done + +lemma sbn_sch_act_sane: + "\sch_act_sane\ setBoundNotification ntfn t \\_. sch_act_sane\" + apply (simp add: setBoundNotification_def) + apply (wp | simp add: threadSet_sch_act_sane)+ + done + +lemma possibleSwitchTo_corres: + "corres dc + (valid_etcbs and weak_valid_sched_action and cur_tcb and st_tcb_at runnable t + and pspace_aligned and pspace_distinct) + (valid_queues and valid_queues' and (\s. weak_sch_act_wf (ksSchedulerAction s) s) + and valid_objs') + (possible_switch_to t) + (possibleSwitchTo t)" + supply ethread_get_wp[wp del] + apply (rule corres_cross_over_guard[where P'=Q and Q="tcb_at' t and Q" for Q]) + apply (clarsimp simp: state_relation_def) + apply (rule tcb_at_cross, erule st_tcb_at_tcb_at; assumption) + apply (simp add: possible_switch_to_def possibleSwitchTo_def cong: if_cong) + apply (rule corres_guard_imp) + apply (rule corres_split[OF curDomain_corres], simp) + apply (rule corres_split) + apply (rule ethreadget_corres[where r="(=)"]) + apply (clarsimp simp: etcb_relation_def) + apply (rule corres_split[OF getSchedulerAction_corres]) + apply (rule corres_if, simp) + apply (rule tcbSchedEnqueue_corres) + apply (rule corres_if, simp) + apply (case_tac action; simp) + apply (rule corres_split[OF rescheduleRequired_corres]) + apply (rule tcbSchedEnqueue_corres) + apply (wp rescheduleRequired_valid_queues'_weak)+ + apply (rule setSchedulerAction_corres, simp) + apply (wpsimp simp: if_apply_def2 + wp: hoare_drop_imp[where f="ethread_get a b" for a b])+ + apply (wp hoare_drop_imps)[1] + apply wp+ + apply (clarsimp simp: valid_sched_def invs_def valid_state_def cur_tcb_def st_tcb_at_tcb_at + valid_sched_action_def weak_valid_sched_action_def + tcb_at_is_etcb_at[OF st_tcb_at_tcb_at[rotated]]) + apply (simp add: tcb_at_is_etcb_at) + done + +end +end