(* * Copyright 2014, General Dynamics C4 Systems * * SPDX-License-Identifier: GPL-2.0-only *) (* Higher level functions for manipulating virtual address spaces *) chapter "x64 VSpace Functions" theory ArchVSpace_A imports Retype_A begin context Arch begin global_naming X64_A text \ These attributes are always set to @{const False} when pages are mapped. \ definition "attr_mask = {Global,Dirty,PTAttr Accessed,PTAttr ExecuteDisable}" definition "attr_mask' = attr_mask \ {PAT}" text \Save the set of entries that would be inserted into a page table or page directory to map various different sizes of frame at a given virtual address.\ primrec create_mapping_entries :: "paddr \ vspace_ref \ vmpage_size \ vm_rights \ frame_attrs \ obj_ref \ (vm_page_entry * obj_ref,'z::state_ext) se_monad" where "create_mapping_entries base vptr X64SmallPage vm_rights attrib pd = doE p \ lookup_error_on_failure False $ lookup_pt_slot pd vptr; returnOk $ (VMPTE (SmallPagePTE base (attrib - attr_mask) vm_rights), p) odE" | "create_mapping_entries base vptr X64LargePage vm_rights attrib pdpt = doE p \ lookup_error_on_failure False $ lookup_pd_slot pdpt vptr; returnOk $ (VMPDE (LargePagePDE base (attrib - attr_mask) vm_rights), p) odE" | "create_mapping_entries base vptr X64HugePage vm_rights attrib pml4 = doE p \ lookup_error_on_failure False $ lookup_pdpt_slot pml4 vptr; returnOk $ (VMPDPTE (HugePagePDPTE base (attrib - attr_mask') vm_rights), p) odE" text \This function checks that given entries are either invalid entries (for unmapping) or replace invalid entries (for mapping).\ fun ensure_safe_mapping :: "(vm_page_entry * obj_ref) \ (unit,'z::state_ext) se_monad" where "ensure_safe_mapping (VMPTE InvalidPTE, _) = returnOk ()" | "ensure_safe_mapping (VMPDE InvalidPDE, _) = returnOk ()" | "ensure_safe_mapping (VMPDPTE InvalidPDPTE, _) = returnOk ()" | "ensure_safe_mapping (VMPTE (SmallPagePTE _ _ _), pt_slot) = returnOk ()" | "ensure_safe_mapping (VMPDE (LargePagePDE _ _ _), pd_slot) = doE pde \ liftE $ get_pde pd_slot; (case pde of InvalidPDE \ returnOk () | LargePagePDE _ _ _ \ returnOk () | _ \ throwError DeleteFirst) odE" | "ensure_safe_mapping (VMPDPTE (HugePagePDPTE _ _ _), pdpt_slot) = doE pdpt \ liftE $ get_pdpte pdpt_slot; (case pdpt of InvalidPDPTE \ returnOk () | HugePagePDPTE _ _ _ \ returnOk () | _ \ throwError DeleteFirst) odE" | "ensure_safe_mapping (VMPDE (PageTablePDE _ _ _), _) = fail" | "ensure_safe_mapping (VMPDPTE (PageDirectoryPDPTE _ _ _), _) = fail" text \Look up a thread's IPC buffer and check that the thread has the right authority to read or (in the receiver case) write to it.\ definition lookup_ipc_buffer :: "bool \ obj_ref \ (obj_ref option,'z::state_ext) s_monad" where "lookup_ipc_buffer is_receiver thread \ do buffer_ptr \ thread_get tcb_ipc_buffer thread; buffer_frame_slot \ return (thread, tcb_cnode_index 4); buffer_cap \ get_cap buffer_frame_slot; (case buffer_cap of ArchObjectCap (PageCap _ p R _ vms _) \ if vm_read_write \ R \ vm_read_only \ R \ \is_receiver then return $ Some $ p + (buffer_ptr && mask (pageBitsForSize vms)) else return None | _ \ return None) od" text \Locate the page directory associated with a given virtual ASID.\ definition find_vspace_for_asid :: "asid \ (obj_ref,'z::state_ext) lf_monad" where "find_vspace_for_asid asid \ doE assertE (asid > 0); asid_table \ liftE $ gets (x64_asid_table \ arch_state); pool_ptr \ returnOk (asid_table (asid_high_bits_of asid)); pool \ (case pool_ptr of Some ptr \ liftE $ get_asid_pool ptr | None \ throwError InvalidRoot); pml4 \ returnOk (pool (asid_low_bits_of asid)); (case pml4 of Some ptr \ returnOk ptr | None \ throwError InvalidRoot) odE" text \Locate the page directory and check that this process succeeds and returns a pointer to a real page directory.\ definition find_vspace_for_asid_assert :: "asid \ (obj_ref,'z::state_ext) s_monad" where "find_vspace_for_asid_assert asid \ do pml4 \ find_vspace_for_asid asid K fail; get_pml4 pml4; return pml4 od" text \Format a VM fault message to be passed to a thread's supervisor after it encounters a page fault.\ definition handle_vm_fault :: "obj_ref \ vmfault_type \ (unit,'z::state_ext) f_monad" where "handle_vm_fault thread fault_type = doE addr \ liftE $ do_machine_op getFaultAddress; fault \ liftE $ as_user thread $ getRegister ErrorRegister; case fault_type of X64DataFault \ throwError $ ArchFault $ VMFault addr [0, fault && mask 5] | X64InstructionFault \ throwError $ ArchFault $ VMFault addr [1, fault && mask 5] odE" definition get_current_cr3 :: "(cr3, 'z::state_ext) s_monad" where "get_current_cr3 \ gets (x64_current_cr3 \ arch_state)" definition set_current_cr3 :: "cr3 \ (unit,'z::state_ext) s_monad" where "set_current_cr3 c \ modify (\s. s \arch_state := (arch_state s) \x64_current_cr3 := c\\)" definition invalidate_page_structure_cache_asid :: "obj_ref \ asid \ (unit, 'z::state_ext) s_monad" where "invalidate_page_structure_cache_asid vspace asid \ do_machine_op $ invalidateLocalPageStructureCacheASID vspace (ucast asid)" definition getCurrentVSpaceRoot :: "(obj_ref, 'z::state_ext) s_monad" where "getCurrentVSpaceRoot \ do cur \ get_current_cr3; return $ cr3_base_address cur od" definition "cr3_addr_mask \ mask pml4_shift_bits << asid_bits" definition make_cr3 :: "obj_ref \ asid \ cr3" where "make_cr3 vspace asid \ cr3 (vspace && cr3_addr_mask) asid" definition set_current_vspace_root :: "obj_ref \ asid \ (unit, 'z::state_ext) s_monad" where "set_current_vspace_root vspace asid \ set_current_cr3 $ make_cr3 vspace asid" text \Switch into the address space of a given thread or the global address space if none is correctly configured.\ definition set_vm_root :: "obj_ref \ (unit,'z::state_ext) s_monad" where "set_vm_root tcb \ do thread_root_slot \ return (tcb, tcb_cnode_index 1); thread_root \ get_cap thread_root_slot; (case thread_root of ArchObjectCap (PML4Cap pml4 (Some asid)) \ doE pml4' \ find_vspace_for_asid asid; whenE (pml4 \ pml4') $ throwError InvalidRoot; cur_cr3 \ liftE $ get_current_cr3; whenE (cur_cr3 \ make_cr3 (addrFromPPtr pml4) asid) $ liftE $ set_current_cr3 $ make_cr3 (addrFromPPtr pml4) asid odE | _ \ throwError InvalidRoot) (\_. do global_pml4 \ gets (x64_global_pml4 \ arch_state); set_current_vspace_root (addrFromKPPtr global_pml4) 0 od) od" text \Remove virtual to physical mappings in either direction involving this virtual ASID.\ definition hw_asid_invalidate :: "asid \ obj_ref \ (unit,'z::state_ext) s_monad" where "hw_asid_invalidate asid vspace \ do_machine_op $ invalidateASID vspace (ucast asid)" definition delete_asid_pool :: "asid \ obj_ref \ (unit,'z::state_ext) s_monad" where "delete_asid_pool base ptr \ do assert (asid_low_bits_of base = 0); asid_table \ gets (x64_asid_table \ arch_state); when (asid_table (asid_high_bits_of base) = Some ptr) $ do pool \ get_asid_pool ptr; mapM (\offset. (when (pool (ucast offset) \ None) $ hw_asid_invalidate (base + offset) (the (pool (ucast offset))))) [0 .e. (1 << asid_low_bits) - 1]; asid_table' \ return (asid_table (asid_high_bits_of base:= None)); modify (\s. s \ arch_state := (arch_state s) \ x64_asid_table := asid_table' \\); tcb \ gets cur_thread; set_vm_root tcb od od" text \When deleting a page map level 4 from an ASID pool we must deactivate it.\ definition delete_asid :: "asid \ obj_ref \ (unit,'z::state_ext) s_monad" where "delete_asid asid pml4 \ do asid_table \ gets (x64_asid_table \ arch_state); case asid_table (asid_high_bits_of asid) of None \ return () | Some pool_ptr \ do pool \ get_asid_pool pool_ptr; when (pool (asid_low_bits_of asid) = Some pml4) $ do hw_asid_invalidate asid pml4; pool' \ return (pool (asid_low_bits_of asid := None)); set_asid_pool pool_ptr pool'; tcb \ gets cur_thread; set_vm_root tcb od od od" definition flush_all :: "obj_ref \ asid \ (unit,'z::state_ext) s_monad" where "flush_all vspace asid \ do_machine_op $ invalidateASID vspace (ucast asid)" abbreviation flush_pdpt :: "obj_ref \ asid \ (unit,'z::state_ext) s_monad" where "flush_pdpt \ flush_all" abbreviation flush_pd :: "obj_ref \ asid \ (unit,'z::state_ext) s_monad" where "flush_pd \ flush_all" text \Flush mappings associated with a page table.\ definition flush_table :: "obj_ref \ vspace_ref \ obj_ref \ asid \ (unit,'z::state_ext) s_monad" where "flush_table pml4_ref vptr pt_ref asid \ do assert (vptr && mask (ptTranslationBits + pageBits) = 0); pt \ get_pt pt_ref; forM_x [0 .e. (-1::9 word)] (\index. do pte \ return $ pt index; case pte of InvalidPTE \ return () | _ \ do_machine_op $ invalidateTranslationSingleASID (vptr + (ucast index << pageBits)) (ucast asid) od) od" text \Unmap a Page Directory Pointer Table from a PML4.\ definition unmap_pdpt :: "asid \ vspace_ref \ obj_ref \ (unit,'z::state_ext) s_monad" where "unmap_pdpt asid vaddr pdpt \ doE vspace \ find_vspace_for_asid asid; pm_slot \ returnOk $ lookup_pml4_slot vspace vaddr; pml4e \ liftE $ get_pml4e pm_slot; case pml4e of PDPointerTablePML4E pt' _ _ \ if pt' = addrFromPPtr pdpt then returnOk () else throwError InvalidRoot | _ \ throwError InvalidRoot; liftE $ do flush_pdpt vspace asid; store_pml4e pm_slot InvalidPML4E od odE (K $ return ())" text \Unmap a Page Directory from a Page Directory Pointer Table.\ definition unmap_pd :: "asid \ vspace_ref \ obj_ref \ (unit,'z::state_ext) s_monad" where "unmap_pd asid vaddr pd \ doE vspace \ find_vspace_for_asid asid; pdpt_slot \ lookup_pdpt_slot vspace vaddr; pdpte \ liftE $ get_pdpte pdpt_slot; case pdpte of PageDirectoryPDPTE pd' _ _ \ if pd' = addrFromPPtr pd then returnOk () else throwError InvalidRoot | _ \ throwError InvalidRoot; liftE $ do flush_pd vspace asid; store_pdpte pdpt_slot InvalidPDPTE; invalidate_page_structure_cache_asid (addrFromPPtr vspace) asid od odE (K $ return ())" text \Unmap a page table from its page directory.\ definition unmap_page_table :: "asid \ vspace_ref \ obj_ref \ (unit,'z::state_ext) s_monad" where "unmap_page_table asid vaddr pt \ doE vspace \ find_vspace_for_asid asid; pd_slot \ lookup_pd_slot vspace vaddr; pde \ liftE $ get_pde pd_slot; case pde of PageTablePDE addr _ _ \ if addrFromPPtr pt = addr then returnOk () else throwError InvalidRoot | _ \ throwError InvalidRoot; liftE $ do flush_table vspace vaddr pt asid; store_pde pd_slot InvalidPDE; invalidate_page_structure_cache_asid (addrFromPPtr vspace) asid od odE (K $ return ())" text \Check that a given frame is mapped by a given mapping entry.\ definition check_mapping_pptr :: "machine_word \ vm_page_entry \ bool" where "check_mapping_pptr pptr entry \ case entry of VMPTE (SmallPagePTE base _ _) \ base = addrFromPPtr pptr | VMPDE (LargePagePDE base _ _) \ base = addrFromPPtr pptr | VMPDPTE (HugePagePDPTE base _ _) \ base = addrFromPPtr pptr | _ \ False" text \Unmap a mapped page if the given mapping details are still current.\ definition unmap_page :: "vmpage_size \ asid \ vspace_ref \ obj_ref \ (unit,'z::state_ext) s_monad" where "unmap_page pgsz asid vptr pptr \ doE vspace \ find_vspace_for_asid asid; case pgsz of X64SmallPage \ doE pt_slot \ lookup_pt_slot vspace vptr; pte \ liftE $ get_pte pt_slot; unlessE (check_mapping_pptr pptr (VMPTE pte)) $ throwError InvalidRoot; liftE $ store_pte pt_slot InvalidPTE odE | X64LargePage \ doE pd_slot \ lookup_pd_slot vspace vptr; pde \ liftE $ get_pde pd_slot; unlessE (check_mapping_pptr pptr (VMPDE pde)) $ throwError InvalidRoot; liftE $ store_pde pd_slot InvalidPDE odE | X64HugePage \ doE pdpt_slot \ lookup_pdpt_slot vspace vptr; pdpte \ liftE $ get_pdpte pdpt_slot; unlessE (check_mapping_pptr pptr (VMPDPTE pdpte)) $ throwError InvalidRoot; liftE $ store_pdpte pdpt_slot InvalidPDPTE odE; liftE $ do_machine_op $ invalidateTranslationSingleASID vptr (ucast asid) odE (K $ return ())" text \Page table structure capabilities cannot be copied until they have a virtual ASID and location assigned. This is because they cannot have multiple current virtual ASIDs and cannot be shared between address spaces or virtual locations.\ definition arch_derive_cap :: "arch_cap \ (cap,'z::state_ext) se_monad" where "arch_derive_cap c \ case c of PageTableCap _ (Some x) \ returnOk (ArchObjectCap c) | PageTableCap _ None \ throwError IllegalOperation | PageDirectoryCap _ (Some x) \ returnOk (ArchObjectCap c) | PageDirectoryCap _ None \ throwError IllegalOperation | PDPointerTableCap _ (Some x) \ returnOk (ArchObjectCap c) | PDPointerTableCap _ None \ throwError IllegalOperation | PML4Cap _ (Some x) \ returnOk (ArchObjectCap c) | PML4Cap _ None \ throwError IllegalOperation | PageCap dev r R mt pgs x \ returnOk $ ArchObjectCap (PageCap dev r R VMNoMap pgs None) | ASIDControlCap \ returnOk (ArchObjectCap c) | ASIDPoolCap _ _ \ returnOk (ArchObjectCap c) \<^cancel>\FIXME x64-vtd: | IOSpaceCap _ _ \ returnOk c | IOPageTableCap _ _ _ \ returnOk c\ | IOPortCap _ _ \ returnOk (ArchObjectCap c) | IOPortControlCap \ returnOk NullCap" text \No user-modifiable data is stored in x64-specific capabilities.\ definition arch_update_cap_data :: "bool \ data \ arch_cap \ cap" where "arch_update_cap_data preserve data c \ ArchObjectCap c" text \Actions that must be taken on finalisation of x64-specific capabilities.\ definition arch_finalise_cap :: "arch_cap \ bool \ (cap \ cap,'z::state_ext) s_monad" where "arch_finalise_cap c x \ case (c, x) of (ASIDPoolCap ptr b, True) \ do delete_asid_pool b ptr; return (NullCap, NullCap) od | (PML4Cap ptr (Some a), True) \ do delete_asid a ptr; return (NullCap, NullCap) od | (PDPointerTableCap ptr (Some (a,v)), True) \ do unmap_pdpt a v ptr; return (NullCap, NullCap) od | (PageDirectoryCap ptr (Some (a,v)), True) \ do unmap_pd a v ptr; return (NullCap, NullCap) od | (PageTableCap ptr (Some (a, v)), True) \ do unmap_page_table a v ptr; return (NullCap, NullCap) od | (PageCap _ ptr _ _ s (Some (a, v)), _) \ do unmap_page s a v ptr; return (NullCap, NullCap) od | (IOPortCap f l, True) \ return (NullCap, (ArchObjectCap (IOPortCap f l))) \ \FIXME x64-vtd: IOSpaceCap and IOPageTableCap for @{text arch_finalise_cap}\ | _ \ return (NullCap, NullCap)" text \A thread's virtual address space capability must be to a mapped PML4 (page map level 4) to be valid on the x64 architecture.\ definition is_valid_vtable_root :: "cap \ bool" where "is_valid_vtable_root c \ \r a. c = ArchObjectCap (PML4Cap r (Some a))" definition check_valid_ipc_buffer :: "vspace_ref \ cap \ (unit,'z::state_ext) se_monad" where "check_valid_ipc_buffer vptr c \ case c of (ArchObjectCap (PageCap False _ _ _ _ _)) \ doE whenE (\ is_aligned vptr msg_align_bits) $ throwError AlignmentError; returnOk () odE | _ \ throwError IllegalOperation" text \Decode a user argument word describing the kind of VM attributes a mapping is to have.\ definition attribs_from_word :: "machine_word \ frame_attrs" where "attribs_from_word w \ let V = (if w !!0 then {PTAttr WriteThrough} else {}); V' = (if w!!1 then insert (PTAttr CacheDisabled) V else V) in if w!!2 then insert PAT V' else V'" text \Update the mapping data saved in a page or page table capability.\ definition update_map_data :: "arch_cap \ (asid \ vspace_ref) option \ vmmap_type option \ arch_cap" where "update_map_data cap m mt \ case cap of PageCap dev p R _ sz _ \ PageCap dev p R (the mt) sz m | PageTableCap p _ \ PageTableCap p m | PageDirectoryCap p _ \ PageDirectoryCap p m | PDPointerTableCap p _ \ PDPointerTableCap p m" text \ A pointer is inside a user frame if its top bits point to a @{text DataPage}. \ definition in_user_frame :: "obj_ref \ 'z::state_ext state \ bool" where "in_user_frame p s \ \sz. kheap s (p && ~~ mask (pageBitsForSize sz)) = Some (ArchObj (DataPage False sz))" definition fpu_thread_delete :: "obj_ref \ (unit, 'z::state_ext) s_monad" where "fpu_thread_delete thread_ptr \ do using_fpu \ do_machine_op (nativeThreadUsingFPU thread_ptr); when using_fpu $ do_machine_op (switchFpuOwner 0 0) od" definition prepare_thread_delete :: "obj_ref \ (unit,'z::state_ext) s_monad" where "prepare_thread_delete thread_ptr \ fpu_thread_delete thread_ptr" text \Make numeric value of @{const msg_align_bits} visible.\ lemmas msg_align_bits = msg_align_bits'[unfolded word_size_bits_def, simplified] end end