diff --git a/proof/ROOT b/proof/ROOT index 9fb5226c20..fe09d1346d 100644 --- a/proof/ROOT +++ b/proof/ROOT @@ -151,7 +151,7 @@ session Access in "access-control" = AInvs + session InfoFlow in "infoflow" = Access + directories "$L4V_ARCH" - theories + theories [quick_and_dirty] (* for development only *) "InfoFlow_Image_Toplevel" session InfoFlowCBase in "infoflow/refine/base" = CRefine + diff --git a/proof/access-control/DomainSepInv.thy b/proof/access-control/DomainSepInv.thy index c9d51848e2..b3b67e4d36 100644 --- a/proof/access-control/DomainSepInv.thy +++ b/proof/access-control/DomainSepInv.thy @@ -30,6 +30,7 @@ definition domain_sep_inv :: "bool \ 'a :: state_ext state \ \ cte_wp_at ((=) (IRQHandlerCap irq)) slot s \ interrupt_states s irq \ IRQSignal \ interrupt_states s irq \ IRQReserved + \ (irq \ non_kernel_IRQs \ interrupt_states s irq = IRQInactive) \ interrupt_states s = interrupt_states st))" definition domain_sep_inv_cap where @@ -59,6 +60,7 @@ lemma domain_sep_inv_def2: \ \ cte_wp_at ((=) (IRQHandlerCap irq)) slot s)) \ (irqs \ (\irq. interrupt_states s irq \ IRQSignal \ interrupt_states s irq \ IRQReserved + \ (irq \ non_kernel_IRQs \ interrupt_states s irq = IRQInactive) \ interrupt_states s = interrupt_states st)))" by (fastforce simp: domain_sep_inv_def) @@ -90,7 +92,9 @@ lemma domain_sep_inv_wp: apply (rule disjI2) apply simp apply (intro allI conjI) - apply (erule_tac P1="\x. x irq \ IRQSignal" in use_valid[OF _ irq_pres], assumption) + apply (erule_tac P1="\x. x irq \ IRQSignal" in use_valid[OF _ irq_pres], assumption) + apply blast + apply (erule use_valid[OF _ irq_pres], assumption) apply blast apply (erule use_valid[OF _ irq_pres], assumption) apply blast diff --git a/proof/infoflow/AARCH64/ArchADT_IF.thy b/proof/infoflow/AARCH64/ArchADT_IF.thy new file mode 100644 index 0000000000..9f56ed5962 --- /dev/null +++ b/proof/infoflow/AARCH64/ArchADT_IF.thy @@ -0,0 +1,352 @@ +(* + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +text \ + This file sets up a kernel automaton, ADT_A_if, which is + slightly different from ADT_A. + It then setups a big step framework to transfrom this automaton in the + big step automaton on which the infoflow theorem will be proved +\ + +theory ArchADT_IF +imports ADT_IF +begin + +context Arch begin global_naming AARCH64 + +named_theorems ADT_IF_assms + +(* FIXME: clagged from AInvs.do_user_op_invs *) +lemma do_user_op_if_invs[ADT_IF_assms]: + "\invs and ct_running\ + do_user_op_if f tc + \\_. invs and ct_running\" + apply (simp add: do_user_op_if_def split_def) + apply (wp do_machine_op_ct_in_state device_update_invs | wp (once) dmo_invs | simp)+ + apply (clarsimp simp: user_mem_def user_memory_update_def simpler_modify_def restrict_map_def + invs_def cur_tcb_def ptable_rights_s_def ptable_lift_s_def) + apply (frule ptable_rights_imp_frame) + apply fastforce + apply simp + apply (clarsimp simp: valid_state_def device_frame_in_device_region) + done + +crunch do_user_op_if + for domain_sep_inv[ADT_IF_assms, wp]: "domain_sep_inv irqs st" + (ignore: user_memory_update) + +crunch do_user_op_if + for valid_sched[ADT_IF_assms, wp]: "valid_sched" + (ignore: user_memory_update) + +crunch do_user_op_if + for irq_masks[ADT_IF_assms, wp]: "\s. P (irq_masks_of_state s)" + (ignore: user_memory_update wp: dmo_wp no_irq) + +crunch do_user_op_if + for valid_list[ADT_IF_assms, wp]: "valid_list" + (ignore: user_memory_update) + +lemma do_user_op_if_scheduler_action[ADT_IF_assms, wp]: + "do_user_op_if f tc \\s. P (scheduler_action s)\" + by (simp add: do_user_op_if_def | wp | wpc)+ + +lemma do_user_op_silc_inv[ADT_IF_assms, wp]: + "do_user_op_if f tc \silc_inv aag st\" + apply (simp add: do_user_op_if_def) + apply (wp | wpc | simp)+ + done + +lemma do_user_op_pas_refined[ADT_IF_assms, wp]: + "do_user_op_if f tc \pas_refined aag\" + apply (simp add: do_user_op_if_def) + apply (wp | wpc | simp)+ + done + +crunch do_user_op_if + for cur_thread[ADT_IF_assms, wp]: "\s. P (cur_thread s)" + and cur_domain[ADT_IF_assms, wp]: "\s. P (cur_domain s)" + and idle_thread[ADT_IF_assms, wp]: "\s. P (idle_thread s)" + and domain_fields[ADT_IF_assms, wp]: "domain_fields P" + (ignore: user_memory_update) + +lemma do_use_op_guarded_pas_domain[ADT_IF_assms, wp]: + "do_user_op_if f tc \guarded_pas_domain aag\" + by (rule guarded_pas_domain_lift; wp) + +lemma tcb_arch_ref_tcb_context_set[ADT_IF_assms, simp]: + "tcb_arch_ref (tcb_arch_update (arch_tcb_context_set tc) tcb) = tcb_arch_ref tcb" + by (simp add: tcb_arch_ref_def arch_tcb_context_set_def) + +crunch arch_activate_idle_thread, arch_switch_to_thread + for cur_thread[ADT_IF_assms, wp]: "\s. P (cur_thread s)" + +lemma arch_activate_idle_thread_scheduler_action[ADT_IF_assms, wp]: + "arch_activate_idle_thread t \\s :: det_state. P (scheduler_action s)\" + by (wpsimp simp: arch_activate_idle_thread_def) + +crunch handle_vm_fault, handle_hypervisor_fault + for domain_fields[ADT_IF_assms, wp]: "domain_fields P" + +lemma arch_perform_invocation_noErr[ADT_IF_assms, wp]: + "\\\ arch_perform_invocation a -, \Q\" + by (wpsimp simp: arch_perform_invocation_def) + +lemma arch_invoke_irq_control_noErr[ADT_IF_assms, wp]: + "\\\ arch_invoke_irq_control a -, \Q\" + by (cases a; wpsimp) + +lemma getActiveIRQ_None[ADT_IF_assms]: + "(None,s') \ fst (do_machine_op (getActiveIRQ in_kernel) s) \ + irq_at (irq_state (machine_state s) + 1) (irq_masks (machine_state s)) = None" + apply (erule use_valid) + apply (wp dmo_getActiveIRQ_wp) + by simp + +lemma getActiveIRQ_Some[ADT_IF_assms]: + "(Some i, s') \ fst (do_machine_op (getActiveIRQ in_kernel) s) + \ irq_at (irq_state (machine_state s) + 1) (irq_masks (machine_state s)) = Some i" + apply (erule use_valid) + apply (wp dmo_getActiveIRQ_wp) + by simp + +lemma idle_equiv_as_globals_equiv: + "arm_us_global_vspace (arch_state s) \ idle_thread s + \ idle_equiv st s = + globals_equiv (st\arch_state := arch_state s, machine_state := machine_state s, + kheap:= (kheap st)(arm_us_global_vspace (arch_state s) := + kheap s (arm_us_global_vspace (arch_state s))), + cur_thread := cur_thread s\) s" + by (clarsimp simp: idle_equiv_def globals_equiv_def tcb_at_def2) + +lemma idle_globals_lift: + assumes g: "\st. \globals_equiv st and P\ f \\_. globals_equiv st\" + assumes i: "\s. P s \ arm_us_global_vspace (arch_state s) \ idle_thread s" + shows "\idle_equiv st and P\ f \\_. idle_equiv st\" + apply (clarsimp simp: valid_def) + apply (subgoal_tac "arm_us_global_vspace (arch_state s) \ idle_thread s") + apply (subst (asm) idle_equiv_as_globals_equiv,simp+) + apply (frule use_valid[OF _ g]) + apply simp+ + apply (clarsimp simp: idle_equiv_def globals_equiv_def tcb_at_def2) + apply (erule i) + done + +lemma idle_equiv_as_globals_equiv_scheduler: + "arm_us_global_vspace (arch_state s) \ idle_thread s + \ idle_equiv st s = + globals_equiv_scheduler (st\arch_state := arch_state s, machine_state := machine_state s, + kheap:= (kheap st)(arm_us_global_vspace (arch_state s) := + kheap s (arm_us_global_vspace (arch_state s)))\) s" + by (clarsimp simp: idle_equiv_def tcb_at_def2 globals_equiv_scheduler_def + arch_globals_equiv_scheduler_def) + +lemma idle_globals_lift_scheduler: + assumes g: "\st. \globals_equiv_scheduler st and P\ f \\_. globals_equiv_scheduler st\" + assumes i: "\s. P s \ arm_us_global_vspace (arch_state s) \ idle_thread s" + shows "\idle_equiv st and P\ f \\_. idle_equiv st\" + apply (clarsimp simp: valid_def) + apply (subgoal_tac "arm_us_global_vspace (arch_state s) \ idle_thread s") + apply (subst (asm) idle_equiv_as_globals_equiv_scheduler,simp+) + apply (frule use_valid[OF _ g]) + apply simp+ + apply (clarsimp simp: idle_equiv_def globals_equiv_scheduler_def tcb_at_def2) + apply (erule i) + done + +lemma invs_pt_not_idle_thread[intro]: + "invs s \ arm_us_global_vspace (arch_state s) \ idle_thread s" + by (fastforce dest: valid_global_arch_objs_pt_at + simp: invs_def valid_state_def valid_arch_state_def valid_global_objs_def + obj_at_def valid_idle_def pred_tcb_at_def empty_table_def) + +lemma kernel_entry_if_idle_equiv[ADT_IF_assms]: + "\invs and (\s. e \ Interrupt \ ct_active s) and idle_equiv st + and (\s. ct_idle s \ tc = idle_context s)\ + kernel_entry_if e tc + \\_. idle_equiv st\" + apply (rule hoare_pre) + apply (rule idle_globals_lift) + apply (wp kernel_entry_if_globals_equiv) + apply force + apply (fastforce intro!: invs_pt_not_idle_thread)+ + done + +lemmas handle_preemption_idle_equiv[ADT_IF_assms, wp] = + idle_globals_lift[OF handle_preemption_globals_equiv invs_pt_not_idle_thread, simplified] + +lemmas schedule_if_idle_equiv[ADT_IF_assms, wp] = + idle_globals_lift_scheduler[OF schedule_if_globals_equiv_scheduler invs_pt_not_idle_thread, simplified] + +lemma do_user_op_if_idle_equiv[ADT_IF_assms, wp]: + "\idle_equiv st and invs\ + do_user_op_if uop tc + \\_. idle_equiv st\" + unfolding do_user_op_if_def + by (wpsimp wp: dmo_user_memory_update_idle_equiv dmo_device_memory_update_idle_equiv) + +lemma kernel_entry_if_valid_vspace_objs_if[ADT_IF_assms, wp]: + "\valid_vspace_objs_if and invs and (\s. e \ Interrupt \ ct_active s)\ + kernel_entry_if e tc + \\_. valid_vspace_objs_if\" + by wpsimp + +lemma handle_preemption_if_valid_pdpt_objs[ADT_IF_assms, wp]: + "\valid_vspace_objs_if\ handle_preemption_if a \\rv s. valid_vspace_objs_if s\" + by wpsimp + +lemma schedule_if_valid_pdpt_objs[ADT_IF_assms, wp]: + "\valid_vspace_objs_if\ schedule_if a \\rv s. valid_vspace_objs_if s\" + by wpsimp + +lemma do_user_op_if_valid_pdpt_objs[ADT_IF_assms, wp]: + "\valid_vspace_objs_if\ do_user_op_if a b \\rv s. valid_vspace_objs_if s\" + by wpsimp + +lemma valid_vspace_objs_if_ms_update[ADT_IF_assms, simp]: + "valid_vspace_objs_if (machine_state_update f s) = valid_vspace_objs_if s" + by simp + +lemma do_user_op_if_irq_state_of_state[ADT_IF_assms]: + "do_user_op_if utf uc \\s. P (irq_state_of_state s)\" + apply (rule hoare_pre) + apply (simp add: do_user_op_if_def user_memory_update_def | wp dmo_wp | wpc)+ + done + +lemma do_user_op_if_irq_masks_of_state[ADT_IF_assms]: + "do_user_op_if utf uc \\s. P (irq_masks_of_state s)\" + apply (rule hoare_pre) + apply (simp add: do_user_op_if_def user_memory_update_def | wp dmo_wp | wpc)+ + done + +lemma do_user_op_if_irq_measure_if[ADT_IF_assms]: + "do_user_op_if utf uc \\s. P (irq_measure_if s)\" + apply (rule hoare_pre) + apply (simp add: do_user_op_if_def user_memory_update_def irq_measure_if_def + | wps |wp dmo_wp | wpc)+ + done + +crunch set_flags, arch_post_set_flags + for irq_states_of_state[wp]: "\s. P (irq_state_of_state s)" + +lemma invoke_tcb_irq_state_inv[ADT_IF_assms]: + "\(\s. irq_state_inv st s) and domain_sep_inv False sta + and tcb_inv_wf tinv and K (irq_is_recurring irq st)\ + invoke_tcb tinv + \\_ s. irq_state_inv st s\, \\_. irq_state_next st\" + apply (case_tac tinv) + apply ((wp hoare_vcg_if_lift mapM_x_wp[OF _ subset_refl] + | wpc + | simp split del: if_split add: check_cap_at_def + | clarsimp + | wp (once) irq_state_inv_triv)+)[3] + defer + apply ((wp irq_state_inv_triv | simp)+)[2] + apply (simp add: split_def cong: option.case_cong) + by (clarsimp split del: if_split cong: conj_cong + | wp hoare_vcg_all_liftE_R hoare_vcg_all_lift hoare_vcg_const_imp_liftE_R + checked_cap_insert_domain_sep_inv cap_delete_deletes + cap_delete_irq_state_inv[where st=st and sta=sta and irq=irq] + cap_delete_irq_state_next[where st=st and sta=sta and irq=irq] + cap_delete_valid_cap cap_delete_cte_at + | wpc + | simp add: emptyable_def tcb_cap_cases_def tcb_cap_valid_def + tcb_at_st_tcb_at option_update_thread_def + | strengthen use_no_cap_to_obj_asid_strg + | wp (once) irq_state_inv_triv hoare_drop_imps + | clarsimp split: option.splits | intro impI conjI allI)+ + +lemma reset_untyped_cap_irq_state_inv[ADT_IF_assms]: + "\irq_state_inv st and K (irq_is_recurring irq st)\ + reset_untyped_cap slot + \\y. irq_state_inv st\, \\y. irq_state_next st\" + apply (cases "irq_is_recurring irq st", simp_all) + apply (simp add: reset_untyped_cap_def) + apply (rule hoare_pre) + apply (wp no_irq_clearMemory mapME_x_wp' hoare_vcg_const_imp_lift + get_cap_wp preemption_point_irq_state_inv'[where irq=irq] + | rule irq_state_inv_triv + | simp add: unless_def + | wp (once) dmo_wp)+ + done + +lemma handle_vm_fault_irq_state_of_state[ADT_IF_assms]: + "handle_vm_fault thread fault \\s. P (irq_state_of_state s)\" + unfolding handle_vm_fault_def addressTranslateS1_def + by (wpsimp wp: dmo_wp) + +lemma handle_hypervisor_fault_irq_state_of_state[ADT_IF_assms]: + "handle_hypervisor_fault thread fault \\s. P (irq_state_of_state s)\" + by (cases fault, wpsimp wp: dmo_wp split_del: if_split) + + +text \Not true of invoke_untyped any more.\ +crunch create_cap + for irq_state_of_state[ADT_IF_assms, wp]: "\s. P (irq_state_of_state s)" + (ignore: freeMemory + wp: dmo_wp modify_wp crunch_wps + simp: freeMemory_def storeWord_def clearMemory_def + machine_op_lift_def machine_rest_lift_def mapM_x_defsym) + +crunch arch_invoke_irq_control + for irq_state_of_state[ADT_IF_assms, wp]: "\s. P (irq_state_of_state s)" + (wp: dmo_wp crunch_wps simp: setIRQTrigger_def machine_op_lift_def machine_rest_lift_def) + +lemma handle_reserved_irq_non_kernel_IRQs[ADT_IF_assms]: + "\P and K (irq \ non_kernel_IRQs)\ handle_reserved_irq irq \\_. P\" + unfolding handle_reserved_irq_def + apply (rule hoare_gen_asm) + apply (wpsimp wp: when_wp[where P'="\"] simp: non_kernel_IRQs_def irq_vppi_event_index_def) + done + +lemma thread_set_context_state_hyp_refs_of[CNode_AC_assms]: + "thread_set (tcb_arch_update (arch_tcb_context_set ctxt)) t \\s. P (state_hyp_refs_of s)\" + apply (wpsimp simp: thread_set_def wp: set_object_wp ) + apply (erule_tac P=P in back_subst) + apply (rule ext) + apply (simp add: state_hyp_refs_of_def get_tcb_def arch_tcb_context_set_def + split: option.splits kernel_object.splits) + done + +(* FIXME AARCH64 IF: make generic *) +lemma thread_set_context_pas_refined[ADT_IF_assms]: + "thread_set (tcb_arch_update (arch_tcb_context_set ctxt)) t \pas_refined aag\" + unfolding pas_refined_def state_objs_to_policy_def + apply (rule hoare_weaken_pre) + apply (wpsimp wp: tcb_domain_map_wellformed_lift_strong thread_set_edomains) + apply (wps thread_set_state_vrefs thread_set_context_state_hyp_refs_of) + apply (rule hoare_lift_Pf2[where f="caps_of_state"]) + apply (rule hoare_lift_Pf2[where f="thread_st_auth"]) + apply (rule hoare_lift_Pf2[where f="thread_bound_ntfns"]) + apply wp + apply (wpsimp wp: thread_set_thread_bound_ntfns_trivT ) + apply (wpsimp wp: thread_set_thread_st_auth_trivT) + apply (wpsimp wp: thread_set_caps_of_state_trivial simp: ran_tcb_cap_cases) + apply simp + done + +crunch init_arch_objects + for irq_states_of_state[ADT_IF_assms, wp]: "\s. P (irq_state_of_state s)" + (wp: crunch_wps dmo_wp) + +end + + +global_interpretation ADT_IF_1?: ADT_IF_1 +proof goal_cases + interpret Arch . + case 1 show ?case + by (unfold_locales; (fact ADT_IF_assms | wp )?) +qed + +sublocale valid_initial_state \ valid_initial_state?: ADT_valid_initial_state .. + + +hide_fact ADT_IF_1.do_user_op_silc_inv +requalify_facts AARCH64.do_user_op_silc_inv +declare do_user_op_silc_inv[wp] + +end diff --git a/proof/infoflow/AARCH64/ArchArch_IF.thy b/proof/infoflow/AARCH64/ArchArch_IF.thy new file mode 100644 index 0000000000..4ef6782085 --- /dev/null +++ b/proof/infoflow/AARCH64/ArchArch_IF.thy @@ -0,0 +1,1433 @@ +(* + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +theory ArchArch_IF +imports Arch_IF +begin + +context Arch begin global_naming AARCH64 + +named_theorems Arch_IF_assms + +(* we need to know we're not doing an asid pool update, or else this could affect + what some other domain sees *) +lemma set_object_equiv_but_for_labels: + "\equiv_but_for_labels aag L st and (\ s. \ asid_pool_at ptr s) and + K ((\asid_pool. obj \ ArchObj (ASIDPool asid_pool)) \ pasObjectAbs aag ptr \ L)\ + set_object ptr obj + \\_. equiv_but_for_labels aag L st\" + apply (wpsimp wp: set_object_wp) + apply (clarsimp simp: equiv_but_for_labels_def) + apply (subst dummy_kheap_update[where st=st]) + apply (rule states_equiv_for_non_asid_pool_kheap_update) + apply assumption + apply (fastforce intro: equiv_forI elim: states_equiv_forE equiv_forE) + apply (fastforce simp: non_asid_pool_kheap_update_def) + apply (clarsimp simp: non_asid_pool_kheap_update_def asid_pool_at_kheap) + done + +lemma get_tcb_not_asid_pool_at: + "get_tcb ref s = Some y \ \ asid_pool_at ref s" + by (fastforce simp: get_tcb_def asid_pool_at_kheap) + +lemma as_user_set_register_ev2: + assumes domains_distinct: "pas_domains_distinct aag" + shows "labels_are_invisible aag l (pasObjectAbs aag ` {thread,thread'}) + \ equiv_valid_2 (reads_equiv aag) (affects_equiv aag l) (affects_equiv aag l) (=) \ \ + (as_user thread (setRegister x y)) (as_user thread' (setRegister a b))" + apply (simp add: as_user_def) + apply (rule equiv_valid_2_guard_imp) + apply (rule_tac L="{pasObjectAbs aag thread}" and L'="{pasObjectAbs aag thread'}" + and Q="\" and Q'="\" in ev2_invisible[OF domains_distinct]) + apply (simp add: labels_are_invisible_def)+ + apply ((rule modifies_at_mostI + | wp set_object_equiv_but_for_labels + | simp add: split_def + | fastforce dest: get_tcb_not_asid_pool_at)+)[2] + apply auto + done + +crunch arch_post_cap_deletion + for valid_global_refs[Arch_IF_assms, wp]: "valid_global_refs" + +crunch store_word_offs + for irq_state_of_state[Arch_IF_assms, wp]: "\s. P (irq_state_of_state s)" + (wp: crunch_wps dmo_wp simp: storeWord_def) + +crunch set_irq_state, arch_post_cap_deletion, handle_arch_fault_reply + for irq_state_of_state[Arch_IF_assms, wp]: "\s. P (irq_state_of_state s)" + (wp: crunch_wps dmo_wp simp: crunch_simps maskInterrupt_def) + +crunch readVCPUHardwareReg, read_cntpct, check_export_arch_timer, writeVCPUHardwareReg, + maskInterrupt, enableFpuEL01, isb, dsb, setHCR, setSCTLR, + set_gic_vcpu_ctrl_hcr, set_gic_vcpu_ctrl_lr, set_gic_vcpu_ctrl_apr, set_gic_vcpu_ctrl_vmcr, + get_gic_vcpu_ctrl_hcr, get_gic_vcpu_ctrl_lr, get_gic_vcpu_ctrl_apr, get_gic_vcpu_ctrl_vmcr, + invalidateTranslationASID, writeFpuState, disableFpu, enableFpu, deactivateInterrupt, do_flush, + sendSGI + for irq_state[wp]: "\ms. P (irq_state ms)" + +crunch arch_switch_to_idle_thread, arch_switch_to_thread + for irq_state_of_state[Arch_IF_assms, wp]: "\s :: det_state. P (irq_state_of_state s)" + (wp: dmo_wp modify_wp crunch_wps whenE_wp + simp: machine_op_lift_def setVSpaceRoot_def + machine_rest_lift_def crunch_simps storeWord_def) + +crunch arch_invoke_irq_handler + for irq_state_of_state[Arch_IF_assms, wp]: "\s. P (irq_state_of_state s)" + (wp: dmo_wp simp: maskInterrupt_def plic_complete_claim_def) + +crunch arch_perform_invocation + for irq_state_of_state[wp]: "\s. P (irq_state_of_state s)" + (wp: dmo_wp modify_wp simp: cache_machine_op_defs + wp: crunch_wps simp: crunch_simps ignore: ignore_failure) + +crunch arch_finalise_cap, prepare_thread_delete + for irq_state_of_state[Arch_IF_assms, wp]: "\s :: det_state. P (irq_state_of_state s)" + (wp: modify_wp crunch_wps dmo_wp + simp: crunch_simps) + +lemma equiv_asid_machine_state_update[Arch_IF_assms, simp]: + "equiv_asid asid (machine_state_update f s) s' = equiv_asid asid s s'" + "equiv_asid asid s (machine_state_update f s') = equiv_asid asid s s'" + by (auto simp: equiv_asid_def) + +lemma as_user_set_register_reads_respects'[Arch_IF_assms]: + assumes domains_distinct: "pas_domains_distinct aag" + shows "reads_respects aag l \ (as_user thread (setRegister x y))" + apply (case_tac "aag_can_read aag thread \ aag_can_affect aag l thread") + apply (simp add: as_user_def split_def) + apply (rule gen_asm_ev) + apply (wp set_object_reads_respects select_f_ev gets_the_ev) + apply (auto intro: reads_affects_equiv_get_tcb_eq det_setRegister)[1] + apply (simp add: equiv_valid_def2) + apply (rule as_user_set_register_ev2[OF domains_distinct]) + apply (simp add: labels_are_invisible_def) + done + +lemma store_word_offs_reads_respects[Arch_IF_assms]: + "reads_respects aag l \ (store_word_offs ptr offs v)" + apply (simp add: store_word_offs_def) + apply (rule equiv_valid_get_assert) + apply (simp add: storeWord_def) + apply (simp add: do_machine_op_bind) + apply wp + apply (rule use_spec_ev) + apply (rule do_machine_op_spec_reads_respects) + apply (clarsimp simp: equiv_valid_def2 equiv_valid_2_def in_monad) + apply (fastforce intro: equiv_forI elim: equiv_forE simp: upto.simps comp_def) + apply (rule use_spec_ev do_machine_op_spec_reads_respects assert_ev2 + | simp add: spec_equiv_valid_def | wp modify_wp)+ + done + +lemma set_simple_ko_globals_equiv[Arch_IF_assms]: + "\globals_equiv s and valid_arch_state\ + set_simple_ko f ptr ep + \\_. globals_equiv s\" + unfolding set_simple_ko_def + apply (wpsimp wp: set_object_globals_equiv[THEN hoare_set_object_weaken_pre] get_object_wp + simp: partial_inv_def)+ + apply (fastforce simp: obj_at_def valid_arch_state_def dest: valid_global_arch_objs_pt_at) + done + +crunch set_thread_state_act + for globals_equiv[wp]: "globals_equiv s" + +lemma set_thread_state_globals_equiv[Arch_IF_assms]: + "\globals_equiv s and valid_arch_state\ + set_thread_state ref ts + \\_. globals_equiv s\" + unfolding set_thread_state_def + apply (wp set_object_globals_equiv |simp)+ + apply (intro impI conjI allI) + apply (fastforce simp: valid_arch_state_def obj_at_def tcb_at_def2 get_tcb_def is_tcb_def + dest: get_tcb_SomeD valid_global_arch_objs_pt_at + split: option.splits kernel_object.splits)+ + done + +lemma set_cap_globals_equiv''[Arch_IF_assms]: + "\globals_equiv s and valid_arch_state\ + set_cap cap p + \\_. globals_equiv s\" + unfolding set_cap_def + apply (simp only: split_def) + apply (wp set_object_globals_equiv hoare_vcg_all_lift get_object_wp | wpc | simp)+ + apply (fastforce simp: valid_arch_state_def obj_at_def is_tcb_def + dest: valid_global_arch_objs_pt_at)+ + done + +(* FIXME AARCH64 IF: consolidate definitions *) +lemma set_cap_globals_equiv''': + "\globals_equiv s and valid_global_arch_objs\ + set_cap cap p + \\_. globals_equiv s\" + unfolding set_cap_def + apply (simp only: split_def) + apply (wp set_object_globals_equiv hoare_vcg_all_lift get_object_wp | wpc | simp)+ + apply (fastforce simp: valid_arch_state_def obj_at_def is_tcb_def + dest: valid_global_arch_objs_pt_at)+ + done + +lemma as_user_globals_equiv[Arch_IF_assms]: + "\globals_equiv s and valid_arch_state and (\s. tptr \ idle_thread s)\ + as_user tptr f + \\_. globals_equiv s\" + unfolding as_user_def + apply (wpsimp wp: set_object_globals_equiv simp: split_def) + apply (fastforce simp: valid_arch_state_def get_tcb_def obj_at_def + dest: valid_global_arch_objs_pt_at) + done + +crunch arch_prepare_set_domain, arch_prepare_next_domain + for irq_state_of_state[Arch_IF_assms, wp]: "\s. P (irq_state_of_state s)" + +end + + +requalify_facts + AARCH64.set_simple_ko_globals_equiv + AARCH64.retype_region_irq_state_of_state + AARCH64.arch_perform_invocation_irq_state_of_state + +declare + retype_region_irq_state_of_state[wp] + arch_perform_invocation_irq_state_of_state[wp] + + +global_interpretation Arch_IF_1?: Arch_IF_1 +proof goal_cases + interpret Arch . + case 1 show ?case + by (unfold_locales; (fact Arch_IF_assms)?) +qed + + +lemmas invs_imps = + invs_sym_refs invs_psp_aligned invs_distinct invs_arch_state + invs_valid_global_objs invs_arch_state invs_valid_objs invs_valid_global_refs tcb_at_invs + invs_cur invs_kernel_mappings + + +context Arch begin global_naming AARCH64 + +lemma get_asid_pool_revrv': + "reads_equiv_valid_rv_inv (affects_equiv aag l) aag + (\rv rv'. aag_can_read aag ptr \ rv = rv') \ (get_asid_pool ptr)" + unfolding gets_map_def + apply (subst gets_apply) + apply (subst gets_apply) + apply (rule_tac W="\rv rv'. aag_can_read aag ptr \ rv = rv'" in equiv_valid_rv_bind) + apply (fastforce elim: reads_equivE equiv_forE + simp: equiv_valid_2_def opt_map_def gets_apply_def get_def bind_def return_def) + apply (fastforce simp: equiv_valid_2_def return_def assert_opt_def fail_def split: option.splits) + apply wp + done + +lemma get_asid_pool_rev: + "reads_equiv_valid_inv A aag (K (is_subject aag ptr)) (get_asid_pool ptr)" + unfolding gets_map_def + apply (subst gets_apply) + apply (wpsimp wp: gets_apply_ev) + apply (fastforce elim: reads_equivE equiv_forE simp: opt_map_def) + done + +lemma get_asid_pool_revrv: + "reads_equiv_valid_rv_inv (affects_equiv aag l) aag + (\rv rv'. rv (asid_low_bits_of asid) = rv' (asid_low_bits_of asid)) + (\s. Some a = arm_asid_table (arch_state s) (asid_high_bits_of asid) \ + is_subject_asid aag asid \ asid \ 0) + (get_asid_pool a)" + unfolding gets_map_def assert_opt_def2 + apply (rule equiv_valid_rv_guard_imp) + apply (rule_tac R'="\rv rv'. \p p'. rv a = Some p \ rv' a = Some p' + \ p (asid_low_bits_of asid) = p' (asid_low_bits_of asid)" + and P="\s. Some a = arm_asid_table (arch_state s) (asid_high_bits_of asid) \ + is_subject_asid aag asid \ asid \ 0" + and P'="\s. Some a = arm_asid_table (arch_state s) (asid_high_bits_of asid) \ + is_subject_asid aag asid \ asid \ 0" + in equiv_valid_2_bind) + apply (clarsimp simp: equiv_valid_2_def assert_def bind_def return_def fail_def + split: if_split) + apply (clarsimp simp: equiv_valid_2_def gets_def get_def bind_def return_def fail_def + split: if_split) + apply (drule_tac s="Some a" in sym) + apply (fastforce elim: reads_equivE simp: equiv_asids_def equiv_asid_def) + apply (wp wp_post_taut | simp)+ + done + +lemma asid_high_bits_0_eq_1: + "asid_high_bits_of 0 = asid_high_bits_of 1" + by (auto simp: asid_high_bits_of_def asid_low_bits_def) + +lemma requiv_arm_asid_table_asid_high_bits_of_asid_eq: + "\ is_subject_asid aag asid; reads_equiv aag s t; asid \ 0 \ + \ arm_asid_table (arch_state s) (asid_high_bits_of asid) = + arm_asid_table (arch_state t) (asid_high_bits_of asid)" + apply (erule reads_equivE) + apply (fastforce simp: equiv_asids_def equiv_asid_def intro: aag_can_read_own_asids) + done + +lemma load_vmid_reads_respects: + "reads_respects aag l (K (asid \ 0 \ aag_can_read_asid aag asid)) (load_vmid asid)" + unfolding load_vmid_def + apply wpsimp + apply (erule reads_equivE) + apply (clarsimp simp: equiv_asids_def) + apply (erule equiv_forE) + apply (erule_tac x=asid in allE) + apply clarsimp + apply (fastforce simp: vspace_for_asid_def entry_for_asid_def entry_for_pool_def + pool_for_asid_def vspace_for_pool_def + opt_map_def obind_def obj_at_def equiv_asid_def + split: option.splits) + done + +lemma find_vspace_for_asid_reads_respects: + "reads_respects aag l (K (asid \ 0 \ aag_can_read_asid aag asid)) (find_vspace_for_asid asid)" + unfolding find_vspace_for_asid_def + apply wpsimp + apply (simp add: throw_opt_def) + apply wpsimp + apply wpsimp+ + apply (erule reads_equivE) + apply (clarsimp simp: equiv_asids_def) + apply (erule equiv_forE) + apply (erule_tac x=asid in allE) + apply clarsimp + apply (fastforce simp: vspace_for_asid_def entry_for_asid_def entry_for_pool_def + pool_for_asid_def vspace_for_pool_def + opt_map_def obind_def obj_at_def equiv_asid_def + split: option.splits) + done + +lemma invalidate_tlb_by_asid_reads_respects: + "reads_respects aag l (K (asid \ 0 \ aag_can_read_asid aag asid)) (invalidate_tlb_by_asid asid)" + unfolding invalidate_tlb_by_asid_def invalidateTranslationASID_def + by (wpsimp wp: dmo_mol_reads_respects load_vmid_reads_respects) + +lemma invalidate_tlb_by_asid_va_reads_respects: + "reads_respects aag l (K (asid \ 0 \ aag_can_read_asid aag asid)) (invalidate_tlb_by_asid_va asid vaddr)" + unfolding invalidate_tlb_by_asid_va_def invalidateTranslationSingle_def + by (wpsimp wp: dmo_mol_reads_respects load_vmid_reads_respects) + +lemma ptes_of_reads_equiv: + "\ is_subject aag (table_base pt_t ptr); reads_equiv aag s t \ + \ ptes_of s pt_t ptr = ptes_of t pt_t ptr" + by (fastforce elim: reads_equivE equiv_forE simp: ptes_of_def obind_def opt_map_def) + +lemma pt_walk_reads_equiv: + "\ reads_equiv aag s t; pas_refined aag s; pspace_aligned s; valid_asid_table s; + valid_vspace_objs s; is_subject aag pt; vptr \ user_region; + level \ max_pt_level; vs_lookup_table level asid vptr s = Some (level, pt) \ + \ pt_walk level bot_level pt vptr (ptes_of s) = + pt_walk level bot_level pt vptr (ptes_of t)" + apply (induct level arbitrary: pt; clarsimp) + apply (simp (no_asm) add: pt_walk.simps) + apply (clarsimp simp: obind_def split: if_splits) + apply (subgoal_tac "ptes_of s (level_type level) (pt_slot_offset level pt vptr) = + ptes_of t (level_type level) (pt_slot_offset level pt vptr)") + apply (clarsimp split: option.splits) + apply (frule_tac bot_level="level-1" in vs_lookup_table_extend) + apply (fastforce simp: pt_walk.simps obind_def) + apply clarsimp + apply (erule_tac x="pptr_from_pte x2" in meta_allE) + apply (drule meta_mp) + apply (subst (asm) vs_lookup_split_Some[OF order_less_imp_le[OF bit1.pred]]) + apply fastforce+ + apply (erule_tac pt_ptr=pt in pt_walk_is_subject; fastforce) + apply (erule (1) meta_mp) + apply (rule ptes_of_reads_equiv) + apply (subst table_base_pt_slot_offset) + apply (erule vs_lookup_table_is_aligned) + by fastforce+ + +lemma pt_lookup_from_level_reads_respects: + "reads_respects aag l + (\s. pas_refined aag s \ pspace_aligned s \ valid_vspace_objs s \ valid_asid_table s \ + is_subject aag pt \ level \ max_pt_level \ vref \ user_region \ + (\asid. vs_lookup_table level asid vref s = Some (level, pt))) + (pt_lookup_from_level level pt vref target_pt)" + apply (induct level arbitrary: pt) + apply (simp add: pt_lookup_from_level_simps) + apply wp + apply (simp (no_asm) add: pt_lookup_from_level_simps unlessE_def) + apply clarsimp + apply (rule equiv_valid_guard_imp) + apply (wpsimp wp: get_pte_rev | assumption)+ + apply (frule vs_lookup_table_is_aligned; clarsimp) + apply (prop_tac "pt_walk level (level - 1) pt vref (ptes_of s) = + Some (level - 1, pptr_from_pte rv)") + apply (fastforce simp: vs_lookup_split_Some[OF order_less_imp_le[OF bit1.pred]] + pt_walk.simps obind_def) + apply (rule conjI) + apply (erule_tac level=level and bot_level="level-1" and pt_ptr=pt in pt_walk_is_subject; fastforce) + apply (rule_tac x=asid in exI) + apply (erule (2) vs_lookup_table_extend) + done + +lemma unmap_page_table_reads_respects: + "reads_respects aag l + (pas_refined aag and pspace_aligned and valid_vspace_objs and valid_asid_table + and K (asid \ 0 \ is_subject_asid aag asid \ vaddr \ user_region)) + (unmap_page_table asid vaddr pt)" + unfolding unmap_page_table_def fun_app_def cleanByVA_PoU_def + apply (rule gen_asm_ev) + apply (rule equiv_valid_guard_imp) + apply (wp dmo_mol_reads_respects store_pte_reads_respects get_pte_rev invalidate_tlb_by_asid_reads_respects + pt_lookup_from_level_reads_respects pt_lookup_from_level_is_subject + find_vspace_for_asid_wp find_vspace_for_asid_reads_respects hoare_vcg_all_liftE_R + | wpc | simp | rule hoare_strengthen_postE_R[OF pt_lookup_from_level_is_subject], fastforce)+ + apply clarsimp + apply (frule vspace_for_asid_is_subject) + apply (fastforce dest: vspace_for_asid_vs_lookup vs_lookup_table_vref_independent)+ + done + +lemma perform_page_table_invocation_reads_respects: + "reads_respects aag l (pas_refined aag and pspace_aligned and valid_objs and valid_vspace_objs + and valid_asid_table and valid_pti pti + and K (authorised_page_table_inv aag pti)) + (perform_page_table_invocation pti)" + unfolding perform_page_table_invocation_def perform_pt_inv_map_def perform_pt_inv_unmap_def +cleanByVA_PoU_def cleanCacheRange_PoU_def + apply (rule equiv_valid_guard_imp) + apply (wp dmo_mol_reads_respects store_pte_reads_respects set_cap_reads_respects mapM_x_ev'' + unmap_page_table_reads_respects get_cap_rev + | wpc | simp)+ + apply (case_tac pti; clarsimp simp: authorised_page_table_inv_def) + apply (clarsimp simp: valid_pti_def) + apply (frule cte_wp_valid_cap) + apply fastforce + apply (clarsimp simp: is_PageTableCap_def valid_cap_def wellformed_mapdata_def add_mask_fold) + done + +lemma unmap_page_reads_respects: + "reads_respects aag l + (pas_refined aag and pspace_aligned and valid_vspace_objs and valid_asid_table + and K (asid \ 0 \ is_subject_asid aag asid \ vptr \ user_region)) + (unmap_page pgsz asid vptr pptr)" + unfolding unmap_page_def catch_def fun_app_def cleanByVA_PoU_def + apply (simp add: unmap_page_def cong: vmpage_size.case_cong) + apply (simp add: unlessE_def gets_the_def) + apply (wp gets_ev' dmo_mol_reads_respects get_pte_rev throw_on_false_reads_respects + find_vspace_for_asid_reads_respects store_pte_reads_respects[simplified] + invalidate_tlb_by_asid_va_reads_respects + | wpc | simp add: is_aligned_mask[symmetric])+ + apply (clarsimp simp: pt_lookup_slot_def) + apply (frule (3) vspace_for_asid_is_subject) + apply safe + apply (frule vspace_for_asid_vs_lookup) + apply (frule (6) pt_walk_reads_equiv[where bot_level=0]) + apply (rule order_refl) + apply (erule vs_lookup_table_vref_independent[OF _ order_refl]) + apply (clarsimp simp: pt_lookup_slot_from_level_def obind_def split: option.splits) + apply (fastforce elim!: pt_lookup_slot_from_level_is_subject + dest: vspace_for_asid_vs_lookup vs_lookup_table_vref_independent)+ + done + +lemma perform_flush_reads_respects: + "reads_respects aag l \ (perform_flush type vstart vend pstart space asid)" + unfolding perform_flush_def do_flush_def cleanCacheRange_RAM_def invalidateCacheRange_RAM_def + cleanInvalidateCacheRange_RAM_def cleanCacheRange_PoU_def invalidateCacheRange_I_def isb_def dsb_def + by (cases type; wpsimp wp: dmo_mol_reads_respects when_ev simp: dmo_distr) + +lemma perform_page_invocation_reads_respects: + assumes domains_distinct[wp]: "pas_domains_distinct aag" + shows + "reads_respects aag l (pas_refined aag and authorised_page_inv aag pgi and valid_page_inv pgi + and valid_vspace_objs and valid_asid_table + and pspace_aligned and is_subject aag \ cur_thread) + (perform_page_invocation pgi)" + unfolding perform_page_invocation_def fun_app_def when_def perform_pg_inv_map_def + perform_pg_inv_unmap_def perform_pg_inv_get_addr_def cleanByVA_PoU_def + apply (rule equiv_valid_guard_imp) + apply (wpsimp) + apply (wp dmo_mol_reads_respects mapM_x_ev'' store_pte_reads_respects set_cap_reads_respects + mapM_ev'' store_pte_reads_respects unmap_page_reads_respects dmo_mol_2_reads_respects + get_cap_rev set_mrs_reads_respects set_message_info_reads_respects + invalidate_tlb_by_asid_va_reads_respects get_pte_rev perform_flush_reads_respects + | simp + | wpc | wp (once) hoare_drop_imps[where Q'="\r s. r"])+ + apply (clarsimp simp: authorised_page_inv_def valid_page_inv_def) + apply (case_tac pgi; clarsimp) + apply (rule conjI) + prefer 2 + apply clarsimp + subgoal sorry (* FIXME AARCH64 IF *) + apply (auto simp: cte_wp_at_caps_of_state authorised_slots_def cap_links_asid_slot_def + label_owns_asid_slot_def valid_arch_cap_def wellformed_mapdata_def + dest!: clas_caps_of_state pas_refined_Control) + done + +lemma equiv_asids_arm_asid_table_update: + "\ equiv_asids R s t; kheap s pool_ptr = kheap t pool_ptr \ + \ equiv_asids R + (s\arch_state := arch_state s\arm_asid_table := (asid_table s) + (asid_high_bits_of asid \ pool_ptr)\\) + (t\arch_state := arch_state t\arm_asid_table := (asid_table t) + (asid_high_bits_of asid \ pool_ptr)\\)" + by (clarsimp simp: equiv_asids_def equiv_asid_def asid_pool_at_kheap opt_map_def) + +lemma arm_asid_table_update_reads_respects: + "reads_respects aag l (K (is_subject aag pool_ptr)) + (do r \ gets local.asid_table; + modify (\s. s\arch_state := + arch_state s\arm_asid_table := r(asid_high_bits_of asid \ pool_ptr)\\) + od)" + apply (simp add: equiv_valid_def2) + apply (rule_tac W="\\" + and Q="\rv s. is_subject aag pool_ptr \ rv = arm_asid_table (arch_state s)" + in equiv_valid_rv_bind) + apply (rule equiv_valid_rv_guard_imp[OF equiv_valid_rv_trivial]) + apply wpsimp+ + apply (rule modify_ev2) + apply clarsimp + apply (drule (1) is_subject_kheap_eq[rotated]) + apply (fastforce simp: reads_equiv_def2 affects_equiv_def2 states_equiv_for_def equiv_for_def + intro!: equiv_asids_arm_asid_table_update) + apply wpsimp + done + +lemma perform_asid_control_invocation_reads_respects: + notes K_bind_ev[wp del] + shows "reads_respects aag l (K (authorised_asid_control_inv aag aci)) + (perform_asid_control_invocation aci)" + unfolding perform_asid_control_invocation_def + apply (rule gen_asm_ev) + apply (rule equiv_valid_guard_imp) + (* we do some hacky rewriting here to separate out the bit that does interesting stuff from the rest *) + apply (subst (6) my_bind_rewrite_lemma) + apply (subst (1) bind_assoc[symmetric]) + apply (subst another_hacky_rewrite) + apply (subst another_hacky_rewrite) + apply (wpc) + apply (rule bind_ev) + apply (rule K_bind_ev) + apply (rule_tac P'=\ in bind_ev) + apply (rule K_bind_ev) + apply (rule bind_ev) + apply (rule bind_ev) + apply (rule return_ev) + apply (rule K_bind_ev) + apply simp + apply (rule arm_asid_table_update_reads_respects) + apply (wp cap_insert_reads_respects retype_region_reads_respects + set_cap_reads_respects delete_objects_reads_respects get_cap_rev + | simp add: authorised_asid_control_inv_def)+ + apply (auto dest!: is_aligned_no_overflow) + done + +lemma set_asid_pool_reads_respects: + "reads_respects aag l (K (is_subject aag ptr)) (set_asid_pool ptr pool)" + unfolding set_asid_pool_def + by (wpsimp wp: set_object_reads_respects get_asid_pool_rev) + +lemma set_asid_pool_globals_equiv: + "\globals_equiv s and valid_global_arch_objs\ + set_asid_pool ptr pool + \\_. globals_equiv s\" + unfolding set_asid_pool_def + apply (wpsimp wp: set_object_globals_equiv[THEN hoare_set_object_weaken_pre] simp: a_type_def) + apply (fastforce simp: obj_at_def dest: valid_global_arch_objs_pt_at) + done + +lemma perform_asid_pool_invocation_reads_respects_g: + "reads_respects_g aag l (pas_refined aag and invs and K (authorised_asid_pool_inv aag api)) + (perform_asid_pool_invocation api)" + unfolding perform_asid_pool_invocation_def store_asid_pool_entry_def + apply (rule equiv_valid_guard_imp) + apply (wpsimp wp: reads_respects_g[OF set_asid_pool_reads_respects] + reads_respects_g[OF get_asid_pool_rev] + set_asid_pool_globals_equiv set_cap_reads_respects + doesnt_touch_globalsI get_cap_auth_wp[where aag=aag] get_cap_rev + set_cap_reads_respects_g get_cap_reads_respects_g + | strengthen valid_arch_state_global_arch_objs + | wp (once) hoare_drop_imps)+ + apply (clarsimp simp: invs_arch_state invs_valid_global_objs invs_psp_aligned + invs_valid_global_vspace_mappings authorised_asid_pool_inv_def + cong: conj_cong) + done + +lemma equiv_asids_arm_asid_table_delete: + "equiv_asids R s t + \ equiv_asids R + (s\arch_state := arch_state s\arm_asid_table := \a. if a = asid_high_bits_of asid then None + else arm_asid_table (arch_state s) a\\) + (t\arch_state := arch_state t\arm_asid_table := \a. if a = asid_high_bits_of asid then None + else arm_asid_table (arch_state t) a\\)" + by (clarsimp simp: equiv_asids_def equiv_asid_def asid_pool_at_kheap) + +lemma arm_asid_table_delete_ev2: + "equiv_valid_2 (reads_equiv aag) (affects_equiv aag l) (affects_equiv aag l) \\ + (\s. rv = arm_asid_table (arch_state s)) (\s. rv' = arm_asid_table (arch_state s)) + (modify (\s. s\arch_state := arch_state s\arm_asid_table := \a. if a = asid_high_bits_of base + then None + else rv a\\)) + (modify (\s. s\arch_state := arch_state s\arm_asid_table := \a. if a = asid_high_bits_of base + then None + else rv' a\\))" + apply (rule modify_ev2) + (* slow 15s *) + by (auto simp: reads_equiv_def2 affects_equiv_def2 + intro!: states_equiv_forI equiv_forI equiv_asids_arm_asid_table_delete + elim!: states_equiv_forE equiv_forE + elim: is_subject_kheap_eq[simplified reads_equiv_def2 states_equiv_for_def, rotated]) + + +lemma requiv_arm_asid_table_asid_high_bits_of_asid_eq': + "\ (\asid'. asid' \ 0 \ asid_high_bits_of asid' = asid_high_bits_of base + \ is_subject_asid aag asid'); reads_equiv aag s t \ + \ arm_asid_table (arch_state s) (asid_high_bits_of base) = + arm_asid_table (arch_state t) (asid_high_bits_of base)" + apply (insert asid_high_bits_0_eq_1) + apply (case_tac "base = 0") + apply (subgoal_tac "is_subject_asid aag 1") + apply simp + apply (rule requiv_arm_asid_table_asid_high_bits_of_asid_eq[where aag=aag]) + apply (erule_tac x=1 in allE) + apply simp+ + apply (rule requiv_arm_asid_table_asid_high_bits_of_asid_eq[where aag=aag]) + apply (erule_tac x=base in allE) + apply simp+ + done + +(* FIXME AARCH64 IF: not true *) +lemma get_vmid_vmid_states_equiv_for[wp]: + "get_vmid asid \states_equiv_for P Q R S st\" + unfolding get_vmid_def + apply wpsimp + sorry + +(* FIXME AARCH64 IF: not true *) +lemma set_vm_root_states_equiv_for[wp]: + "set_vm_root thread \states_equiv_for P Q R S st\" + unfolding set_vm_root_def catch_def fun_app_def set_global_user_vspace_def arm_context_switch_def + by (wpsimp wp: do_machine_op_mol_states_equiv_for + hoare_vcg_all_lift whenE_wp hoare_drop_imps + simp: setVSpaceRoot_def dmo_bind_valid if_apply_def2)+ + +lemma delete_asid_pool_reads_respects: + "reads_respects aag l (K (\asid'. asid' \ 0 \ asid_high_bits_of asid' = asid_high_bits_of base + \ is_subject_asid aag asid')) + (delete_asid_pool base ptr)" + unfolding delete_asid_pool_def + apply (rule equiv_valid_guard_imp) + apply (rule bind_ev) + apply (simp) + apply (subst equiv_valid_def2) + apply (rule_tac W="\\" + and Q="\rv s. rv = arm_asid_table (arch_state s) \ + (\asid'. asid' \ 0 \ asid_high_bits_of asid' = asid_high_bits_of base + \ is_subject_asid aag asid')" + in equiv_valid_rv_bind) + apply (rule equiv_valid_rv_guard_imp[OF equiv_valid_rv_trivial]) + apply (wp, simp) + apply (simp add: when_def) + apply (clarsimp | rule conjI)+ + apply (rule equiv_valid_2_guard_imp) + apply (rule equiv_valid_2_bind) + apply (rule equiv_valid_2_bind) + apply (rule equiv_valid_2_unobservable) + sorry +(* + apply (wp set_vm_root_states_equiv_for)+ + apply (rule arm_asid_table_delete_ev2) + apply (wp)+ + apply (rule equiv_valid_2_unobservable) + by (wp mapM_wp' return_ev2 + | rule conjI | drule (1) requiv_arm_asid_table_asid_high_bits_of_asid_eq' + | clarsimp | simp add: equiv_valid_2_def)+ +*) + +lemma set_asid_pool_state_equal_except_kheap: + "((), s') \ fst (set_asid_pool ptr pool s) + \ states_equal_except_kheap_asid s s' \ + (\p. p \ ptr \ kheap s p = kheap s' p) \ + asid_pools_of s' ptr = Some pool \ + (\asid. asid \ 0 + \ arm_asid_table (arch_state s) (asid_high_bits_of asid) = + arm_asid_table (arch_state s') (asid_high_bits_of asid) \ + (\pool_ptr. arm_asid_table (arch_state s) (asid_high_bits_of asid) = + Some pool_ptr + \ asid_pool_at pool_ptr s = asid_pool_at pool_ptr s' \ + (\asid_pool asid_pool'. pool_ptr \ ptr + \ asid_pools_of s pool_ptr = + Some asid_pool \ + asid_pools_of s' pool_ptr = + Some asid_pool' + \ asid_pool (asid_low_bits_of asid) = + asid_pool' (asid_low_bits_of asid))))" + by (clarsimp simp: set_asid_pool_def put_def bind_def set_object_def get_object_def gets_map_def + gets_def get_def return_def assert_def assert_opt_def fail_def + states_equal_except_kheap_asid_def equiv_for_def obj_at_def + split: if_split_asm option.split_asm) + +lemma set_asid_pool_delete_ev2: + "equiv_valid_2 (reads_equiv aag) (affects_equiv aag l) (affects_equiv aag l) \\ + (\s. arm_asid_table (arch_state s) (asid_high_bits_of asid) = Some a \ + asid_pools_of s a = Some pool \ asid \ 0 \ is_subject_asid aag asid) + (\s. arm_asid_table (arch_state s) (asid_high_bits_of asid) = Some a \ + asid_pools_of s a = Some pool' \ asid \ 0 \ is_subject_asid aag asid) + (set_asid_pool a (pool(asid_low_bits_of asid := None))) + (set_asid_pool a (pool'(asid_low_bits_of asid := None)))" + apply (clarsimp simp: equiv_valid_2_def) + apply (frule_tac s'=b in set_asid_pool_state_equal_except_kheap) + apply (frule_tac s'=ba in set_asid_pool_state_equal_except_kheap) + apply (clarsimp simp: states_equal_except_kheap_asid_def) + apply (rule conjI) + apply (clarsimp simp: states_equiv_for_def reads_equiv_def equiv_for_def | rule conjI)+ + apply (case_tac "x=a") + apply (clarsimp simp: opt_map_def split: option.splits) + apply (fastforce) + apply (clarsimp simp: equiv_asids_def equiv_asid_def | rule conjI)+ + apply (case_tac "pool_ptr = a") + apply (clarsimp) + apply (erule_tac x="pasASIDAbs aag asid" in ballE) + apply (clarsimp) + apply (erule_tac x=asid in allE)+ + apply (clarsimp) + apply (drule aag_can_read_own_asids, simp) + apply (erule_tac x="pasASIDAbs aag asida" in ballE) + apply (clarsimp) + apply (erule_tac x=asida in allE)+ + apply (clarsimp) + apply (clarsimp) + apply (clarsimp) + apply (case_tac "pool_ptr=a") + apply (erule_tac x="pasASIDAbs aag asida" in ballE; clarsimp) + apply (clarsimp simp: opt_map_def split: option.splits) + apply (clarsimp simp: affects_equiv_def equiv_for_def states_equiv_for_def | rule conjI)+ + apply (case_tac "x=a") + apply (clarsimp simp: opt_map_def split: option.splits) + apply (fastforce) + apply (clarsimp simp: equiv_asids_def equiv_asid_def | rule conjI)+ + apply (case_tac "pool_ptr=a") + apply (clarsimp simp: opt_map_def split: option.splits) + apply (erule_tac x=asid in allE)+ + apply (clarsimp simp: asid_pool_at_kheap) + apply (erule_tac x=asida in allE)+ + apply (clarsimp) + apply (clarsimp) + apply (case_tac "pool_ptr=a") + apply (clarsimp simp: opt_map_def split: option.splits) + apply (clarsimp simp: opt_map_def split: option.splits) + done + +lemma delete_asid_reads_respects: + "reads_respects aag l (K (asid \ 0 \ is_subject_asid aag asid)) (delete_asid asid pt)" + unfolding delete_asid_def + supply fun_upd_apply[simp del] + apply (subst equiv_valid_def2) + apply (rule_tac W="\\" and Q="\rv s. rv = asid_table s (asid_high_bits_of asid) \ + is_subject_asid aag asid \ asid \ 0" in equiv_valid_rv_bind) + apply (rule equiv_valid_rv_guard_imp[OF equiv_valid_rv_trivial]) + apply (wp, simp) + apply (case_tac "rv = rv'") + apply (simp) + apply (case_tac "rv") + apply (simp) + apply (wp return_ev2, simp) + apply (simp) + apply (rule equiv_valid_2_guard_imp) + apply (rule_tac R'="\rv rv'. rv (asid_low_bits_of asid) = rv' (asid_low_bits_of asid)" + in equiv_valid_2_bind) + apply (simp add: when_def) + apply (clarsimp | rule conjI)+ + apply (rule_tac R'="\\" in equiv_valid_2_bind) + apply (rule_tac R'="\\" in equiv_valid_2_bind) + apply (rule_tac R'="\\" in equiv_valid_2_bind) + apply (rule_tac R'="\\" in equiv_valid_2_bind) + apply (subst equiv_valid_def2[symmetric]) + apply (rule reads_respects_unobservable_unit_return) + apply (wp set_vm_root_states_equiv_for)+ + apply (rule set_asid_pool_delete_ev2) + apply (wp)+ + apply (rule equiv_valid_2_unobservable) + apply (wpsimp wp: do_machine_op_mol_states_equiv_for)+ + sorry +(* + apply (clarsimp | rule return_ev2)+ + apply (rule equiv_valid_2_guard_imp) + apply (wp get_asid_pool_revrv) + apply (simp)+ + apply (wp)+ + apply (clarsimp simp: asid_pools_of_ko_at obj_at_def)+ + apply (clarsimp simp: equiv_valid_2_def reads_equiv_def + equiv_asids_def equiv_asid_def states_equiv_for_def) + apply (erule_tac x="pasASIDAbs aag asid" in ballE) + apply (clarsimp) + apply (drule aag_can_read_own_asids) + apply wpsimp+ + done +*) + +lemma globals_equiv_arm_asid_table_update[simp]: + "globals_equiv s (t\arch_state := arch_state t\arm_asid_table := x\\) = globals_equiv s t" + by (simp add: globals_equiv_def) + +lemma globals_equiv_arm_vmid_table_update[simp]: + "globals_equiv s (t\arch_state := arch_state t\arm_vmid_table := x\\) = globals_equiv s t" + by (simp add: globals_equiv_def) + +lemma globals_equiv_arm_next_vmid_update[simp]: + "globals_equiv s (t\arch_state := arch_state t\arm_next_vmid := x\\) = globals_equiv s t" + by (simp add: globals_equiv_def) + +lemma valid_global_arch_objs_arm_asid_table_update[simp]: + "valid_global_arch_objs (s\arch_state := arch_state s\arm_asid_table := x\\) = valid_global_arch_objs s" + by (simp add: valid_global_arch_objs_def) + +lemma set_global_user_vspace_globals_equiv[wp]: + "set_global_user_vspace \globals_equiv s\" + unfolding set_global_user_vspace_def setVSpaceRoot_def + by wpsimp + +lemma update_asid_pool_entry_globals_equiv[wp]: + "\globals_equiv s and valid_global_arch_objs\ + update_asid_pool_entry f asid + \\_. globals_equiv s\" + unfolding update_asid_pool_entry_def + by (wpsimp wp: set_asid_pool_globals_equiv) + +crunch invalidate_vmid_entry, invalidate_asid + for globals_equiv[wp]: "globals_equiv st" + (wp: crunch_wps dmo_mol_reads_respects simp: crunch_simps) + +lemma find_free_vmid_globals_equiv[wp]: + "\globals_equiv s and valid_global_arch_objs\ + find_free_vmid + \\_. globals_equiv s\" + unfolding find_free_vmid_def invalidateTranslationASID_def + by wpsimp + +crunch get_vmid + for globals_equiv[wp]: "globals_equiv st" + (wp: crunch_wps dmo_mol_reads_respects simp: crunch_simps) + +lemma arm_context_switch_globals_equiv[wp]: + "\globals_equiv s and valid_global_arch_objs\ + arm_context_switch vspace asid + \\_. globals_equiv s\" + unfolding arm_context_switch_def setVSpaceRoot_def + by (wpsimp wp: dmo_mol_reads_respects) + +lemma set_vm_root_globals_equiv[wp]: + "\globals_equiv s and valid_global_arch_objs\ + set_vm_root tcb + \\_. globals_equiv s\" + by (wpsimp wp: dmo_mol_globals_equiv hoare_vcg_all_lift hoare_drop_imps + simp: set_vm_root_def setVSpaceRoot_def) + +crunch invalidate_asid_entry + for globals_equiv[wp]: "globals_equiv st" + (wp: crunch_wps dmo_mol_reads_respects simp: crunch_simps) + +lemma invalidate_tlb_by_asid_globals_equiv[wp]: + "invalidate_tlb_by_asid asid \globals_equiv s\" + unfolding invalidate_tlb_by_asid_def invalidateTranslationASID_def + by wpsimp + +lemma invalidate_tlb_by_asid_va_globals_equiv[wp]: + "invalidate_tlb_by_asid_va asid vaddr \globals_equiv s\" + unfolding invalidate_tlb_by_asid_va_def invalidateTranslationSingle_def + by wpsimp + +lemma delete_asid_pool_globals_equiv[wp]: + "\globals_equiv s and valid_global_arch_objs\ + delete_asid_pool base ptr + \\_. globals_equiv s\" + unfolding delete_asid_pool_def + by (wpsimp wp: set_vm_root_globals_equiv mapM_wp[OF _ subset_refl] modify_wp) + +find_consts "5 \ ?'a" name: level + +lemma vs_lookup_slot_not_global: + "\ vs_lookup_slot level asid vref s = Some (level, pte); level \ max_pt_level; + pte_refs_of (level_type level) pte s = Some pt; vref \ user_region; invs s \ + \ pt \ global_refs s" + apply (prop_tac "vs_lookup_target level asid vref s = Some (level, pt)") + apply (clarsimp simp: vs_lookup_target_def obind_def split: if_splits) + apply (erule (2) vs_lookup_target_not_global) + done + +lemma unmap_page_table_globals_equiv: + "\invs and globals_equiv st and K (vaddr \ user_region)\ + unmap_page_table asid vaddr pt + \\rv. globals_equiv st\" + unfolding unmap_page_table_def cleanByVA_PoU_def + apply (wp store_pte_globals_equiv pt_lookup_from_level_wrp | wpc | simp)+ + apply clarsimp + apply (rule_tac x=asid in exI) + apply clarsimp + apply (case_tac "level = asid_pool_level") + apply (fastforce dest: vs_lookup_slot_no_asid simp: ptes_of_Some valid_arch_state_asid_table) + apply (drule vs_lookup_slot_table_base; clarsimp) + apply (drule reachable_page_table_not_global, clarsimp+) + done + +(* FIXME AARCH64 IF: delete if unused +lemma unmap_page_table_valid_arch_state: + "\invs and valid_arch_state and K (vaddr \ user_region)\ + unmap_page_table asid vaddr pt + \\_. valid_arch_state\" + unfolding unmap_page_table_def + apply (wpsimp wp: store_pte_valid_arch_state_unreachable pt_lookup_from_level_wrp) + apply (rule_tac x=asid in exI) + apply clarsimp + apply (case_tac "level = asid_pool_level") + apply (fastforce dest: vs_lookup_slot_no_asid simp: ptes_of_Some valid_arch_state_asid_table) + apply (drule vs_lookup_slot_table_base; clarsimp) + apply (drule reachable_page_table_not_global, clarsimp+) + done +*) + +lemma mapM_x_swp_store_pte_globals_equiv: + "\globals_equiv s and pspace_aligned and valid_arch_state and valid_global_vspace_mappings + and (\s. \x \ set slots. table_base pt_t x \ global_refs s)\ + mapM_x (swp (store_pte pt_t) pte) slots + \\_. globals_equiv s\" + apply (rule_tac Q'="\_. pspace_aligned and globals_equiv s and valid_arch_state + and valid_global_vspace_mappings + and (\s. \x \ set slots. table_base pt_t x \ global_refs s)" + in hoare_strengthen_post) + apply (wp mapM_x_wp' store_pte_valid_arch_state_unreachable + store_pte_valid_global_vspace_mappings store_pte_globals_equiv | simp)+ + apply (auto simp: global_refs_def) + done + +lemma mapM_x_swp_store_pte_valid_ko_at_arch[wp]: + "\pspace_aligned and valid_arch_state and valid_global_vspace_mappings + and (\s. \x \ set slots. table_base pt_t x \ global_refs s)\ + mapM_x (swp (store_pte pt_t) pte) slots + \\_. valid_arch_state\" + apply (rule_tac Q'="\_. pspace_aligned and valid_arch_state and valid_global_vspace_mappings + and (\s. \x \ set slots. table_base pt_t x \ global_refs s)" + in hoare_strengthen_post) + apply (wp mapM_x_wp' store_pte_valid_arch_state_unreachable + store_pte_valid_global_vspace_mappings store_pte_globals_equiv | simp)+ + done + +definition authorised_for_globals_page_table_inv :: + "page_table_invocation \ 's :: state_ext state \ bool" where + "authorised_for_globals_page_table_inv pti \ \s. + case pti of PageTableMap cap ptr pte p lvl \ table_base (level_type lvl) p \ arm_us_global_vspace (arch_state s) + | _ \ True" + +lemma perform_pt_inv_map_globals_equiv: + "\globals_equiv st and valid_arch_state and (\s. table_base (level_type lvl) p \ global_pt s)\ + perform_pt_inv_map cap sl pte p lvl + \\_. globals_equiv st\" + unfolding perform_pt_inv_map_def cleanByVA_PoU_def + by (wpsimp wp: store_pte_globals_equiv set_cap_globals_equiv'') + +lemma perform_pt_inv_unmap_globals_equiv: + "\invs and globals_equiv st and cte_wp_at ((=) (ArchObjectCap cap)) ct_slot\ + perform_pt_inv_unmap cap ct_slot + \\_. globals_equiv st\" + unfolding perform_pt_inv_unmap_def cleanCacheRange_PoU_def + apply (wpsimp wp: set_cap_globals_equiv'' mapM_x_swp_store_pte_globals_equiv) + apply (strengthen invs_imps invs_valid_global_vspace_mappings) + apply (clarsimp cong: conj_cong) + apply (wpsimp wp: unmap_page_table_globals_equiv unmap_page_table_invs) + apply wpsimp+ + apply auto + apply (drule cte_wp_valid_cap, fastforce) + apply (clarsimp simp: is_PageTableCap_def valid_cap_def valid_arch_cap_def wellformed_mapdata_def) + apply (frule cte_wp_valid_cap, fastforce) + apply (clarsimp simp: is_PageTableCap_def valid_cap_def valid_arch_cap_def wellformed_mapdata_def) + apply (prop_tac "table_base x42 x = acap_obj cap") + apply (prop_tac "is_aligned x41 (pt_bits x42)") + apply (fastforce dest: is_aligned_pt simp: valid_arch_cap_def) + apply (simp only: is_aligned_neg_mask_eq') + apply (clarsimp simp: add_mask_fold) + apply (drule subsetD[OF upto_enum_step_subset], clarsimp) + apply (drule_tac n="pt_bits x42" in neg_mask_mono_le) + apply (drule_tac n="pt_bits x42" in neg_mask_mono_le) + apply (fastforce dest: plus_mask_AND_NOT_mask_eq) + apply clarsimp + apply (frule invs_valid_global_refs) + apply (drule (2) valid_global_refsD[OF invs_valid_global_refs]) + apply (clarsimp simp: cap_range_def) + done + +lemma perform_page_table_invocation_globals_equiv: + "\invs and globals_equiv st and valid_pti pti and authorised_for_globals_page_table_inv pti\ + perform_page_table_invocation pti + \\_. globals_equiv st\" + unfolding perform_page_table_invocation_def + apply (wpsimp wp: store_pte_globals_equiv set_cap_globals_equiv'' + perform_pt_inv_map_globals_equiv + perform_pt_inv_unmap_globals_equiv) + apply (case_tac pti; clarsimp simp: authorised_for_globals_page_table_inv_def valid_pti_def) + done + +lemma mapM_swp_store_pte_globals_equiv: + "\globals_equiv s and (\s. \x \ set slots. table_base pt_t x \ global_refs s)\ + mapM (swp (store_pte pt_t) pte) slots + \\_. globals_equiv s\" + apply (rule_tac Q'="\_. globals_equiv s and (\s. \x \ set slots. table_base pt_t x \ global_refs s)" + in hoare_strengthen_post) + apply (wp mapM_wp' store_pte_valid_arch_state_unreachable + store_pte_valid_global_vspace_mappings store_pte_globals_equiv | simp)+ + apply (auto simp: global_refs_def) + done + +(* FIXME AARCH64 IF: delete if unused +lemma mapM_swp_store_pte_valid_ko_at_arch[wp]: + "\globals_equiv s (\s. \x \ set slots. table_base x \ global_refs s)\ + mapM (swp store_pte pte) slots + \\_. valid_arch_state\" + apply (rule_tac Q'="\_. pspace_aligned and globals_equiv s and valid_arch_state + and valid_global_vspace_mappings + and (\s. \x \ set slots. table_base x \ global_refs s)" + in hoare_strengthen_post) + apply (wp mapM_wp' store_pte_valid_arch_state_unreachable + store_pte_valid_global_vspace_mappings store_pte_globals_equiv | simp)+ + apply (clarsimp simp: valid_arch_state_def) + apply (fastforce dest: global_pt_in_global_refs[OF invs_valid_global_arch_objs]) + apply auto + done +*) + +lemma unmap_page_globals_equiv: + "\globals_equiv st and invs and K (vptr \ user_region)\ + unmap_page pgsz asid vptr pptr + \\_. globals_equiv st\" + unfolding unmap_page_def cleanByVA_PoU_def including no_pre + apply (induct pgsz) + apply (wpsimp wp: store_pte_globals_equiv | simp)+ + apply (rule hoare_weaken_preE[OF find_vspace_for_asid_wp]) + apply clarsimp + apply (frule (1) pt_lookup_slot_vs_lookup_slotI0) + apply (drule vs_lookup_slot_table_base; clarsimp?) + apply (drule reachable_page_table_not_global; clarsimp?) + apply fastforce + apply (rule hoare_pre) + apply (wpsimp wp: store_pte_globals_equiv mapM_swp_store_pte_globals_equiv hoare_drop_imps)+ + apply (frule (1) pt_lookup_slot_vs_lookup_slotI0) + apply (drule vs_lookup_slot_level) + apply (case_tac "x = asid_pool_level") + apply (fastforce dest: vs_lookup_slot_no_asid simp: ptes_of_Some valid_arch_state_asid_table) + apply (drule vs_lookup_slot_table_base; clarsimp?) + apply (drule reachable_page_table_not_global; clarsimp?) + apply fastforce + apply (wpsimp wp: store_pte_globals_equiv)+ + apply (rule hoare_weaken_preE[OF find_vspace_for_asid_wp]) + apply clarsimp + apply (frule (1) pt_lookup_slot_vs_lookup_slotI0) + apply (drule vs_lookup_slot_level) + apply (case_tac "x = asid_pool_level") + apply (fastforce dest: vs_lookup_slot_no_asid simp: ptes_of_Some valid_arch_state_asid_table) + apply (drule vs_lookup_slot_table_base; clarsimp?) + apply (drule reachable_page_table_not_global; clarsimp?) + apply fastforce + done + + +definition authorised_for_globals_page_inv :: + "page_invocation \ 'z :: state_ext state \ bool" where + "authorised_for_globals_page_inv pgi \ \s. + case pgi of PageMap cap ptr m \ (\slot. cte_wp_at (parent_for_refs m) slot s) | _ \ True" + + +lemma length_msg_lt_msg_max: + "length msg_registers < msg_max_length" + by (simp add: msg_registers_def msgRegisters_def upto_enum_def + fromEnum_def enum_register msg_max_length_def) + +lemma set_mrs_globals_equiv: + "\globals_equiv s and valid_arch_state and (\sa. thread \ idle_thread sa)\ + set_mrs thread buf msgs + \\_. globals_equiv s\" + unfolding set_mrs_def + apply (wp | wpc)+ + apply (simp add: zipWithM_x_mapM_x) + apply (rule conjI) + apply (rule impI) + apply (rule_tac Q'="\_. globals_equiv s" in hoare_strengthen_post) + apply (wp mapM_x_wp') + apply (simp add: split_def) + apply (wp store_word_offs_globals_equiv) + apply (simp) + apply (clarsimp) + apply (insert length_msg_lt_msg_max) + apply (simp) + apply (wp set_object_globals_equiv hoare_weak_lift_imp) + apply (wp hoare_vcg_all_lift set_object_globals_equiv hoare_weak_lift_imp)+ + apply (fastforce simp: valid_arch_state_def obj_at_def get_tcb_def + dest: valid_global_arch_objs_pt_at) + done + +lemma perform_pg_inv_get_addr_globals_equiv: + "\globals_equiv st and valid_arch_state and (\s. cur_thread s \ idle_thread s)\ + perform_pg_inv_get_addr ptr + \\_. globals_equiv st\" + unfolding perform_pg_inv_get_addr_def + by (wpsimp wp: set_message_info_globals_equiv set_mrs_globals_equiv) + +(* FIXME AARCH64 IF: delete if unused +lemma unmap_page_valid_arch_state: + "\invs and K (vptr \ user_region)\ + unmap_page pgsz asid vptr pptr + \\_. valid_arch_state\" + unfolding unmap_page_def + apply (wpsimp wp: store_pte_valid_arch_state_unreachable) + apply (frule invs_arch_state) + apply (frule invs_valid_global_vspace_mappings) + apply clarsimp + apply (frule (1) pt_lookup_slot_vs_lookup_slotI0) + apply (drule vs_lookup_slot_level) + apply (case_tac "x = asid_pool_level") + apply (fastforce dest: vs_lookup_slot_no_asid simp: ptes_of_Some valid_arch_state_asid_table) + apply (drule vs_lookup_slot_table_base; clarsimp?) + apply (drule reachable_page_table_not_global; clarsimp?) + done +*) + +crunch unmap_page + for valid_global_arch_objs[wp]: "valid_global_arch_objs" + (wp: crunch_wps simp: crunch_simps) + +lemma perform_pg_inv_unmap_globals_equiv: + "\invs and globals_equiv st and cte_wp_at ((=) (ArchObjectCap cap)) ct_slot\ + perform_pg_inv_unmap cap ct_slot + \\_. globals_equiv st\" + unfolding perform_pg_inv_unmap_def + apply (rule hoare_weaken_pre) + apply (wp mapM_swp_store_pte_globals_equiv hoare_vcg_all_lift mapM_x_swp_store_pte_globals_equiv + set_cap_globals_equiv''' unmap_page_globals_equiv store_pte_globals_equiv + store_pte_globals_equiv hoare_weak_lift_imp set_message_info_globals_equiv + perform_pg_inv_get_addr_globals_equiv + | wpc | simp add: do_machine_op_bind)+ + apply (clarsimp simp: acap_map_data_def) + apply (intro conjI; clarsimp) + apply (clarsimp split: arch_cap.splits) + apply (drule cte_wp_valid_cap, fastforce) + apply (clarsimp simp: valid_cap_def valid_arch_cap_def wellformed_mapdata_def) + done + +lemma perform_pg_inv_map_globals_equiv: + "\invs and globals_equiv st and (\s. table_base (level_type lvl) slot \ global_pt s)\ + perform_pg_inv_map cap ct_slot pte slot lvl + \\_. globals_equiv st\" + unfolding perform_pg_inv_map_def cleanByVA_PoU_def + apply (wp mapM_swp_store_pte_globals_equiv hoare_vcg_all_lift mapM_x_swp_store_pte_globals_equiv + set_cap_globals_equiv''' unmap_page_globals_equiv store_pte_globals_equiv + store_pte_globals_equiv hoare_weak_lift_imp set_message_info_globals_equiv + perform_pg_inv_get_addr_globals_equiv + | wpc | simp add: do_machine_op_bind | rule conjI; rule impI, wp hoare_drop_imps)+ + apply fastforce + done + +lemma perform_flush_globals_equiv[wp]: + "perform_flush type vstart vend pstart space asid \globals_equiv st\" + unfolding perform_flush_def do_flush_def cleanCacheRange_RAM_def invalidateCacheRange_RAM_def + cleanInvalidateCacheRange_RAM_def cleanCacheRange_PoU_def invalidateCacheRange_I_def isb_def dsb_def + by (cases type; wpsimp wp: dmo_mol_reads_respects when_ev simp: dmo_distr) + +lemma perform_page_invocation_globals_equiv: + "\invs and authorised_for_globals_page_inv pgi and valid_page_inv pgi + and globals_equiv st and ct_active\ + perform_page_invocation pgi + \\_. globals_equiv st\" + unfolding perform_page_invocation_def + apply (wpsimp wp: perform_pg_inv_get_addr_globals_equiv + perform_pg_inv_unmap_globals_equiv perform_pg_inv_map_globals_equiv) + apply (intro conjI) + apply (clarsimp simp: valid_page_inv_def same_ref_def) + apply (drule vs_lookup_slot_table_base; clarsimp) + apply (drule reachable_page_table_not_global, clarsimp+) + apply (clarsimp simp: valid_page_inv_def) + apply clarsimp + apply (fastforce dest: invs_valid_idle + simp: valid_idle_def pred_tcb_at_def obj_at_def ct_in_state_def) + done + +lemma retype_region_ASIDPoolObj_globals_equiv: + "\globals_equiv s and (\sa. ptr \ global_pt s) and (\sa. ptr \ idle_thread sa)\ + retype_region ptr 1 0 (ArchObject ASIDPoolObj) dev + \\_. globals_equiv s\" + unfolding retype_region_def + apply (wpsimp wp: modify_wp dxo_wp_weak + simp: trans_state_update[symmetric] simp_del: trans_state_update) + apply (fastforce simp: globals_equiv_def idle_equiv_def tcb_at_def2) + done + +lemma perform_asid_control_invocation_globals_equiv: + notes delete_objects_invs[wp del] + notes blah[simp del] = atLeastAtMost_iff atLeastatMost_subset_iff atLeastLessThan_iff + shows "\globals_equiv s and invs and ct_active and valid_aci aci\ + perform_asid_control_invocation aci + \\_. globals_equiv s\" + unfolding perform_asid_control_invocation_def + apply (rule hoare_pre) + apply wpc + apply (rename_tac word1 cslot_ptr1 cslot_ptr2 word2) + apply (wp modify_wp cap_insert_globals_equiv'' + retype_region_ASIDPoolObj_globals_equiv[simplified] + retype_region_invs_extras(5)[where sz=pageBits] + retype_region_invs_extras(6)[where sz=pageBits] + set_cap_globals_equiv + max_index_upd_invs_simple set_cap_no_overlap + set_cap_caps_no_overlap max_index_upd_caps_overlap_reserved + region_in_kernel_window_preserved + hoare_vcg_all_lift get_cap_wp hoare_weak_lift_imp + set_cap_idx_up_aligned_area[where dev = False,simplified] + | simp)+ + (* factor out the implication -- we know what the relevant components of the + cap referred to in the cte_wp_at are anyway from valid_aci, so just use + those directly to simplify the reasoning later on *) + apply (rule_tac Q'="\a b. globals_equiv s b \ invs b \ + word1 \ arm_us_global_vspace (arch_state b) \ word1 \ idle_thread b \ + (\idx. cte_wp_at ((=) (UntypedCap False word1 pageBits idx)) cslot_ptr2 b) \ + descendants_of cslot_ptr2 (cdt b) = {} \ + pspace_no_overlap_range_cover word1 pageBits b" + in hoare_strengthen_post) + prefer 2 + apply (clarsimp simp: globals_equiv_def invs_valid_global_objs) + apply (drule cte_wp_at_eqD2, assumption) + apply clarsimp + apply (clarsimp simp: empty_descendants_range_in) + apply (rule conjI, fastforce simp: cte_wp_at_def) + apply (clarsimp simp: obj_bits_api_def default_arch_object_def) + apply (frule untyped_cap_aligned, simp add: invs_valid_objs) + apply (clarsimp simp: cte_wp_at_caps_of_state) + apply (strengthen refl caps_region_kernel_window_imp[mk_strg I E]) + apply (simp add: invs_valid_objs invs_cap_refs_in_kernel_window + atLeastatMost_subset_iff word_and_le2 + cong: conj_cong) + apply (rule conjI, rule descendants_range_caps_no_overlapI) + apply assumption + apply (simp add: cte_wp_at_caps_of_state) + apply (simp add: empty_descendants_range_in) + apply (clarsimp simp: range_cover_def) + apply (subst is_aligned_neg_mask_eq[THEN sym], assumption) + apply (simp add: word_bw_assocs pageBits_def mask_zero) + apply (wp add: delete_objects_invs_ex delete_objects_pspace_no_overlap[where dev=False] + delete_objects_globals_equiv hoare_vcg_ex_lift + del: Untyped_AI.delete_objects_pspace_no_overlap | simp)+ + apply (clarsimp simp: conj_comms + invs_psp_aligned invs_valid_objs valid_aci_def) + apply (clarsimp simp: cte_wp_at_caps_of_state) + apply (frule_tac cap="UntypedCap False a b c" for a b c in caps_of_state_valid, assumption) + apply (clarsimp simp: valid_cap_def cap_aligned_def untyped_min_bits_def) + apply (frule_tac slot="(aa,ba)" + in untyped_caps_do_not_overlap_global_refs[rotated, OF invs_valid_global_refs]) + apply (clarsimp simp: cte_wp_at_caps_of_state) + apply ((rule conjI |rule refl | simp)+)[1] + apply (rule conjI) + apply (clarsimp simp: global_refs_def ptr_range_memI) + apply (rule conjI) + apply clarify + apply (drule_tac p="global_pt sa" in ptr_range_memI) + apply fastforce + apply (rule conjI, fastforce simp: global_refs_def) + apply (rule conjI) + apply clarify + apply fastforce + apply (rule conjI) + apply (drule untyped_slots_not_in_untyped_range) + apply (blast intro!: empty_descendants_range_in) + apply (simp add: cte_wp_at_caps_of_state) + apply simp + apply (rule refl) + apply (rule subset_refl) + apply (simp) + apply (rule conjI) + apply fastforce + apply (auto intro: empty_descendants_range_in simp: descendants_range_def2 cap_range_def) + done + +lemma store_asid_pool_entry_globals_equiv: + "\globals_equiv st and valid_arch_state\ + store_asid_pool_entry pool_ptr asid ptr + \\_. globals_equiv st\" + unfolding store_asid_pool_entry_def + by (wp modify_wp set_asid_pool_globals_equiv set_cap_globals_equiv'' get_cap_wp | wpc | fastforce)+ + +lemma perform_asid_pool_invocation_globals_equiv: + "\globals_equiv s and invs and valid_apinv api\ + perform_asid_pool_invocation api + \\_. globals_equiv s\" + unfolding perform_asid_pool_invocation_def + apply (rule hoare_weaken_pre) + apply (wp modify_wp set_asid_pool_globals_equiv set_cap_globals_equiv'' + store_asid_pool_entry_globals_equiv get_cap_wp + | wpc | simp)+ + apply (clarsimp simp: valid_apinv_def cong: conj_cong) + done + + +definition authorised_for_globals_arch_inv :: + "arch_invocation \ ('z::state_ext) state \ bool" where + "authorised_for_globals_arch_inv ai \ + case ai of InvokePageTable oper \ authorised_for_globals_page_table_inv oper + | InvokePage oper \ authorised_for_globals_page_inv oper + | _ \ \" + +(* +text \VSpace capabilities confer the authority to flush.\ +definition perform_vspace_invocation :: "vspace_invocation \ (unit,'z::state_ext) s_monad" where + "perform_vspace_invocation iv \ case iv of + VSpaceNothing \ return () + | VSpaceFlush type start end pstart space asid \ perform_flush type start end pstart space asid" + +definition perform_sgi_invocation :: "sgi_signal_invocation \ (unit,'z::state_ext) s_monad" where + "perform_sgi_invocation iv \ case iv of + SGISignalGenerate irq target \ do_machine_op $ sendSGI (ucast irq) (ucast target)" +*) + +crunch perform_vspace_invocation + for globals_equiv[wp]: "globals_equiv st" + (simp: sendSGI_def wp: dmo_mol_globals_equiv) + +lemma perform_sgi_invocation_globals_equiv[wp]: + "perform_sgi_invocation iv + \globals_equiv s\" + unfolding perform_sgi_invocation_def sendSGI_def + by wpsimp + +lemma arch_perform_invocation_reads_respects_g: + assumes domains_distinct[wp]: "pas_domains_distinct aag" + shows "reads_respects_g aag l (ct_active and authorised_arch_inv aag ai and valid_arch_inv ai + and authorised_for_globals_arch_inv ai and invs + and pas_refined aag and is_subject aag \ cur_thread) + (arch_perform_invocation ai)" + unfolding arch_perform_invocation_def fun_app_def + apply (case_tac ai; rule equiv_valid_guard_imp) + apply (wpsimp wp: doesnt_touch_globalsI + reads_respects_g[OF perform_page_table_invocation_reads_respects] + reads_respects_g[OF perform_page_invocation_reads_respects] + reads_respects_g[OF perform_asid_control_invocation_reads_respects] + perform_asid_pool_invocation_reads_respects_g + perform_page_table_invocation_globals_equiv + perform_page_invocation_globals_equiv + perform_asid_control_invocation_globals_equiv + perform_asid_pool_invocation_globals_equiv + simp: authorised_arch_inv_def valid_arch_inv_def authorised_for_globals_arch_inv_def + invs_psp_aligned invs_valid_objs invs_vspace_objs invs_valid_asid_table | simp)+ + sorry + +lemma set_vcpu_globals_equiv[wp]: + "\globals_equiv s and valid_arch_state\ + set_vcpu ptr vcpu + \\_. globals_equiv s\" + unfolding set_vcpu_def + apply (wpsimp wp: set_object_globals_equiv[THEN hoare_set_object_weaken_pre] get_object_wp + simp: partial_inv_def)+ + apply (fastforce simp: obj_at_def valid_arch_state_def dest: valid_global_arch_objs_pt_at) + done + +lemma vcpu_update_globals_equiv[wp]: + "\globals_equiv s and valid_arch_state\ + vcpu_update vr f + \\_. globals_equiv s\" + unfolding vcpu_update_def + by wpsimp + +lemma thread_set_globals_equiv: + "(\tcb. arch_tcb_context_get (tcb_arch (f tcb)) = arch_tcb_context_get (tcb_arch tcb)) + \ \globals_equiv s and valid_arch_state\ thread_set f tptr \\_. globals_equiv s\" + unfolding thread_set_def + apply (wp set_object_globals_equiv) + apply simp + apply (intro impI conjI allI) + apply (fastforce simp: valid_arch_state_def obj_at_def get_tcb_def dest: valid_global_arch_objs_pt_at)+ + done + +lemma arch_thread_set_vcpu_globals_equiv[wp]: + "\globals_equiv s and valid_arch_state\ + arch_thread_set (tcb_vcpu_update f) t + \\_. globals_equiv s\" + unfolding arch_thread_set_is_thread_set + by (wpsimp wp: thread_set_globals_equiv simp: arch_tcb_context_get_def) + +crunch vcpu_save_reg + for globals_equiv[wp]: "globals_equiv st" + (simp: readVCPUHardwareReg_def) + +lemma vcpu_save_globals_equiv[wp]: + "\globals_equiv s and valid_arch_state\ + vcpu_save a + \\_. globals_equiv s\" + unfolding vcpu_save_def vcpu_save_reg_range_def + apply (wpsimp wp: ) + oops + +lemma arch_perform_invocation_globals_equiv: + "\globals_equiv s and invs and ct_active and valid_arch_inv ai + and authorised_for_globals_arch_inv ai\ + arch_perform_invocation ai + \\_. globals_equiv s\" + unfolding arch_perform_invocation_def + apply (wpsimp wp: perform_page_table_invocation_globals_equiv + perform_page_invocation_globals_equiv + perform_asid_control_invocation_globals_equiv + perform_asid_pool_invocation_globals_equiv)+ + apply (auto simp: authorised_for_globals_arch_inv_def invs_def valid_state_def valid_arch_inv_def) + sorry + +crunch arch_post_cap_deletion + for valid_global_objs[wp]: "valid_global_objs" + +lemma get_thread_state_globals_equiv[wp]: + "get_thread_state ref \globals_equiv s\" + by wp + +(* generalises auth_ipc_buffers_mem_Write *) +lemma auth_ipc_buffers_mem_Write': + "\ x \ auth_ipc_buffers s thread; pas_refined aag s; valid_objs s \ + \ (pasObjectAbs aag thread, Write, pasObjectAbs aag x) \ pasPolicy aag" + apply (clarsimp simp add: auth_ipc_buffers_member_def) + apply (drule (1) cap_auth_caps_of_state) + apply simp + apply (clarsimp simp: aag_cap_auth_def cap_auth_conferred_def arch_cap_auth_conferred_def + vspace_cap_rights_to_auth_def vm_read_write_def + split: if_split_asm) + apply (auto dest: ipcframe_subset_page) + done + +end + +hide_fact as_user_globals_equiv + +context begin interpretation Arch . + +requalify_consts + authorised_for_globals_arch_inv + +requalify_facts + arch_post_cap_deletion_valid_global_objs + get_thread_state_globals_equiv + auth_ipc_buffers_mem_Write' + thread_set_globals_equiv + arch_post_modify_registers_cur_domain + arch_post_modify_registers_cur_thread + length_msg_lt_msg_max + set_mrs_globals_equiv + arch_perform_invocation_globals_equiv + as_user_globals_equiv + prepare_thread_delete_st_tcb_at_halted + make_arch_fault_msg_inv + check_valid_ipc_buffer_inv + arch_tcb_update_aux2 + arch_perform_invocation_reads_respects_g + +declare + arch_post_cap_deletion_valid_global_objs[wp] + get_thread_state_globals_equiv[wp] + arch_post_modify_registers_cur_domain[wp] + arch_post_modify_registers_cur_thread[wp] + prepare_thread_delete_st_tcb_at_halted[wp] + +end + +declare as_user_globals_equiv[wp] + +(* +axiomatization dmo_reads_respects where + dmo_read_stval_reads_respects: "reads_respects aag l \ (do_machine_op read_stval)" +*) + +end diff --git a/proof/infoflow/AARCH64/ArchCNode_IF.thy b/proof/infoflow/AARCH64/ArchCNode_IF.thy new file mode 100644 index 0000000000..47ebd6b097 --- /dev/null +++ b/proof/infoflow/AARCH64/ArchCNode_IF.thy @@ -0,0 +1,143 @@ +(* + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +theory ArchCNode_IF +imports CNode_IF +begin + +context Arch begin global_naming AARCH64 + +named_theorems CNode_IF_assms + +lemma set_object_globals_equiv: + "\globals_equiv s and (\s. ptr \ arm_us_global_vspace (arch_state s)) and + (\t. ptr = idle_thread t + \ (\tcb. kheap t (idle_thread t) = Some (TCB tcb) + \ (\tcb'. obj = (TCB tcb') + \ arch_tcb_context_get (tcb_arch tcb) = arch_tcb_context_get (tcb_arch tcb'))) \ + (\tcb'. obj = (TCB tcb') \ tcb_at (idle_thread t) t))\ + set_object ptr obj + \\_. globals_equiv s\" + apply (wpsimp wp: set_object_wp) + apply (case_tac "ptr = idle_thread sa") + apply (clarsimp simp: globals_equiv_def idle_equiv_def tcb_at_def2) + apply (intro impI conjI allI notI iffI | clarsimp)+ + apply (clarsimp simp: globals_equiv_def idle_equiv_def tcb_at_def2) + done + +lemma set_object_globals_equiv'': + "\globals_equiv s and (\ s. ptr \ arm_us_global_vspace (arch_state s)) and (\t. ptr \ idle_thread t)\ + set_object ptr obj + \\_. globals_equiv s\" + by (wpsimp wp: set_object_globals_equiv) + +lemma set_cap_globals_equiv': + "\globals_equiv s and (\ s. fst p \ arm_us_global_vspace (arch_state s))\ + set_cap cap p + \\_. globals_equiv s\" + unfolding set_cap_def + apply (simp only: split_def) + apply (wp set_object_globals_equiv hoare_vcg_all_lift get_object_wp | wpc | simp)+ + apply (fastforce simp: obj_at_def is_tcb_def) + done + +lemma set_cap_globals_equiv[CNode_IF_assms]: + "\globals_equiv s and valid_global_objs and valid_arch_state\ + set_cap cap p + \\_. globals_equiv s\" + unfolding set_cap_def + apply (simp only: split_def) + apply (wp set_object_globals_equiv hoare_vcg_all_lift get_object_wp | wpc | simp)+ + apply (fastforce simp: is_tcb_def obj_at_def valid_arch_state_def + dest: valid_global_arch_objs_pt_at) + done + +definition irq_at :: "nat \ (irq \ bool) \ irq option" where + "irq_at pos masks \ let i = irq_oracle pos in (if masks i then None else Some i)" + +lemma dmo_getActiveIRQ_globals_equiv[CNode_IF_assms]: + "\globals_equiv st\ do_machine_op (getActiveIRQ in_kernel) \\_. globals_equiv st\" + unfolding globals_equiv_def arch_globals_equiv_def idle_equiv_def + apply (rule hoare_weaken_pre) + apply wps + apply wpsimp + apply clarsimp + done + +lemma arch_globals_equiv_irq_state_update[CNode_IF_assms, simp]: + "arch_globals_equiv ct it kh kh' as as' ms (irq_state_update f ms') = + arch_globals_equiv ct it kh kh' as as' ms ms'" + "arch_globals_equiv ct it kh kh' as as' (irq_state_update f ms) ms' = + arch_globals_equiv ct it kh kh' as as' ms ms'" + by auto + +end + + +requalify_consts AARCH64.irq_at + +global_interpretation CNode_IF_1?: CNode_IF_1 _ irq_at +proof goal_cases + interpret Arch . + case 1 show ?case + by (unfold_locales; (fact CNode_IF_assms)?) +qed + + +context Arch begin global_naming AARCH64 + +lemma is_irq_at_triv[CNode_IF_assms]: + assumes a: "\P. \(\s. P (irq_masks (machine_state s))) and Q\ + f + \\rv s. P (irq_masks (machine_state s))\" + shows "\(\s. P (is_irq_at s)) and Q\ f \\rv s. P (is_irq_at s)\" + apply (clarsimp simp: valid_def is_irq_at_def irq_at_def Let_def) + apply (erule use_valid[OF _ a]) + apply simp + done + +lemma is_irq_at_not_masked[CNode_IF_assms]: + "is_irq_at s irq pos \ \ irq_masks (machine_state s) irq" + by (clarsimp simp: is_irq_at_def irq_at_def split: option.splits simp: Let_def split: if_splits) + +end + + +global_interpretation CNode_IF_2?: CNode_IF_2 irq_at +proof goal_cases + interpret Arch . + case 1 show ?case + by (unfold_locales; (fact CNode_IF_assms)?) +qed + + +context Arch begin global_naming AARCH64 + +lemma dmo_getActiveIRQ_reads_respects[CNode_IF_assms]: + notes gets_ev[wp del] + shows "reads_respects aag l (invs and only_timer_irq_inv irq st) + (do_machine_op (getActiveIRQ in_kernel))" + apply (rule use_spec_ev) + apply (rule do_machine_op_spec_reads_respects') + apply (simp add: getActiveIRQ_def) + apply (wp irq_state_increment_reads_respects_memory irq_state_increment_reads_respects_device + gets_ev[where f="irq_oracle \ irq_state"] equiv_valid_inv_conj_lift + gets_irq_masks_equiv_valid modify_wp + | simp add: no_irq_def)+ + apply (rule only_timer_irq_inv_determines_irq_masks, blast+) + done + +end + + +global_interpretation CNode_IF_3?: CNode_IF_3 irq_at +proof goal_cases + interpret Arch . + case 1 show ?case + by (unfold_locales; (fact CNode_IF_assms)?) +qed + +end diff --git a/proof/infoflow/AARCH64/ArchDecode_IF.thy b/proof/infoflow/AARCH64/ArchDecode_IF.thy new file mode 100644 index 0000000000..ea132529ee --- /dev/null +++ b/proof/infoflow/AARCH64/ArchDecode_IF.thy @@ -0,0 +1,447 @@ +(* + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +theory ArchDecode_IF +imports Decode_IF +begin + +context Arch begin global_naming AARCH64 + +named_theorems Decode_IF_assms + +lemma data_to_obj_type_rev[Decode_IF_assms]: + "reads_equiv_valid_inv A aag \ (data_to_obj_type type)" + unfolding data_to_obj_type_def fun_app_def arch_data_to_obj_type_def + apply (wp | wpc)+ + apply simp + done + +lemma check_valid_ipc_buffer_rev[Decode_IF_assms]: + "reads_equiv_valid_inv A aag \ (check_valid_ipc_buffer vptr cap)" + unfolding check_valid_ipc_buffer_def fun_app_def + apply (rule equiv_valid_guard_imp) + apply (wpc | wp)+ + apply simp + done + +lemma arch_check_irq_rev[Decode_IF_assms, wp]: + "reads_equiv_valid_inv A aag \ (arch_check_irq irq)" + unfolding arch_check_irq_def + apply (rule equiv_valid_guard_imp) + apply wpsimp+ + done + +lemma vspace_cap_rights_to_auth_mono[Decode_IF_assms]: + "R \ S \ vspace_cap_rights_to_auth R \ vspace_cap_rights_to_auth S" + by (auto simp: vspace_cap_rights_to_auth_def) + +lemma arch_decode_irq_control_invocation_rev[Decode_IF_assms]: + "reads_equiv_valid_inv A aag + (pas_refined aag and + K (is_subject aag (fst slot) \ (\cap\set caps. pas_cap_cur_auth aag cap) \ + (args \ [] \ (pasSubject aag, Control, pasIRQAbs aag (ucast (args ! 0))) \ pasPolicy aag))) + (arch_decode_irq_control_invocation label args slot caps)" + unfolding arch_decode_irq_control_invocation_def arch_check_irq_def + apply (wp ensure_empty_rev lookup_slot_for_cnode_op_rev + is_irq_active_rev whenE_inv range_check_ev + | wp (once) hoare_drop_imps + | simp add: Let_def unlessE_def split del: if_split)+ + apply safe + apply simp+ + apply (blast intro: aag_Control_into_owns_irq) + apply (drule_tac x="caps ! 0" in bspec) + apply (fastforce intro: bang_0_in_set) + apply (drule (1) is_cnode_into_is_subject; blast dest: prop_of_obj_ref_of_cnode_cap) + apply (fastforce dest: is_cnode_into_is_subject intro: bang_0_in_set) + apply (drule_tac x="caps ! 0" in bspec) + apply (fastforce intro: bang_0_in_set) + apply (drule (1) is_cnode_into_is_subject; blast dest: prop_of_obj_ref_of_cnode_cap) + apply (fastforce dest: is_cnode_into_is_subject intro: bang_0_in_set) + done + +end + + +global_interpretation Decode_IF_1?: Decode_IF_1 +proof goal_cases + interpret Arch . + case 1 show ?case + by (unfold_locales; (fact Decode_IF_assms)?) +qed + + +context Arch begin global_naming AARCH64 + +lemma requiv_arm_asid_table_asid_high_bits_of_asid_eq'': + "\ \asid. is_subject_asid aag asid; reads_equiv aag s t; pas_refined aag x \ + \ arm_asid_table (arch_state s) (asid_high_bits_of base) = + arm_asid_table (arch_state t) (asid_high_bits_of base)" + apply (subgoal_tac "asid_high_bits_of 0 = asid_high_bits_of 1") + apply (case_tac "base = 0") + apply (subgoal_tac "is_subject_asid aag 1") + apply ((auto intro: requiv_arm_asid_table_asid_high_bits_of_asid_eq) | + (auto simp: asid_high_bits_of_def asid_low_bits_def))+ + done + +lemma pas_cap_cur_auth_ASIDControlCap: + "\ pas_cap_cur_auth aag (ArchObjectCap ASIDControlCap); reads_equiv aag s t; pas_refined aag x \ + \ arm_asid_table (arch_state s) = arm_asid_table (arch_state t)" + apply (rule ext) + apply (subst asid_high_bits_of_shift[symmetric]) + apply (subst (3) asid_high_bits_of_shift[symmetric]) + apply (rule requiv_arm_asid_table_asid_high_bits_of_asid_eq'') + apply (clarsimp simp: aag_cap_auth_def cap_links_asid_slot_def label_owns_asid_slot_def) + apply (rule pas_refined_Control_into_is_subject_asid, blast+) + done + +lemma decode_asid_pool_invocation_reads_respects_f: + notes reads_respects_f_inv' = reads_respects_f_inv[where st=st] + notes whenE_wps[wp_split del] + shows + "reads_respects_f aag l + (silc_inv aag st and invs and pas_refined aag and cte_wp_at ((=) (cap.ArchObjectCap cap)) slot + and (\s. \(cap, slot) \ set excaps. cte_wp_at ((=) cap) slot s) + and K (cap = ASIDPoolCap x xa) + and K (\(cap, slot) \ {(cap.ArchObjectCap cap, slot)} \ set excaps. + aag_cap_auth aag (pasObjectAbs aag (fst slot)) cap \ + is_subject aag (fst slot) \ + (\v \ cap_asid' cap. is_subject_asid aag v))) + (decode_asid_pool_invocation label args slot cap excaps)" + unfolding decode_asid_pool_invocation_def + apply (rule equiv_valid_guard_imp) + apply (subst gets_applyE)+ + apply (wp check_vp_wpR + reads_respects_f_inv'[OF get_asid_pool_rev] + gets_apply_ev + select_ext_ev_bind_lift + | wpc + | simp add: Let_def unlessE_whenE + | wp (once) whenE_throwError_wp)+ + apply (intro impI allI conjI) + apply (rule requiv_arm_asid_table_asid_high_bits_of_asid_eq') + apply fastforce + apply (simp add: reads_equiv_f_def) + apply blast + apply (fastforce simp: aag_cap_auth_ASIDPoolCap) + done + +lemma decode_asid_control_invocation_reads_respects_f: + notes reads_respects_f_inv' = reads_respects_f_inv[where st=st] + notes whenE_wps[wp_split del] + shows + "reads_respects_f aag l + (silc_inv aag st and invs and pas_refined aag and cte_wp_at ((=) (cap.ArchObjectCap cap)) slot + and (\s. \(cap, slot) \ set excaps. cte_wp_at ((=) cap) slot s) + and K (cap = ASIDControlCap) + and K (\(cap, slot) \ {(cap.ArchObjectCap cap, slot)} \ set excaps. + aag_cap_auth aag (pasObjectAbs aag (fst slot)) cap \ + is_subject aag (fst slot) \ + (\v \ cap_asid' cap. is_subject_asid aag v))) + (decode_asid_control_invocation label args slot cap excaps)" + unfolding decode_asid_control_invocation_def + apply (rule equiv_valid_guard_imp) + apply (wp check_vp_wpR reads_respects_f_inv'[OF get_asid_pool_rev] + reads_respects_f_inv'[OF ensure_empty_rev] + reads_respects_f_inv'[OF lookup_slot_for_cnode_op_rev] + reads_respects_f_inv'[OF ensure_no_children_rev] + reads_respects_f_inv'[OF lookup_error_on_failure_rev] + gets_apply_ev + is_final_cap_reads_respects + select_ext_ev_bind_lift + select_ext_ev_bind_lift[simplified] + | wpc + | simp add: Let_def unlessE_whenE + | wp (once) whenE_throwError_wp)+ + apply clarsimp + apply (prop_tac "excaps ! Suc 0 \ set excaps", fastforce) + apply (drule_tac x="excaps ! Suc 0" in bspec, assumption) + apply (frule_tac x="excaps ! Suc 0" in bspec, assumption) + apply (drule_tac x="excaps ! 0" in bspec, fastforce intro!: bang_0_in_set) + apply (intro impI allI conjI) + apply (fastforce intro: pas_cap_cur_auth_ASIDControlCap[where aag=aag] simp: reads_equiv_f_def) + apply fastforce + apply (fastforce intro: owns_cnode_owns_obj_ref_of_child_cnodes[where slot="snd (excaps ! (Suc 0))"]) + apply clarify + apply (rule_tac cap="fst (excaps ! Suc 0)" and p="snd (excaps ! Suc 0)" in caps_of_state_pasObjectAbs_eq) + apply (rule cte_wp_at_caps_of_state') + apply fastforce + apply (erule cap_auth_conferred_cnode_cap) + apply fastforce + apply assumption + apply fastforce + done + +(* FIXME AARCH64 IF: proof cleanup *) + +lemma + notes reads_respects_f_inv' = reads_respects_f_inv[where st=st] + notes whenE_wps[wp_split del] + shows reads_respects_f_check_vspace_root[wp]: + "reads_respects_f aag l \ (check_vspace_root cap arg)" + unfolding check_vspace_root_def + by (rule equiv_valid_guard_imp, wpsimp+) + +lemma decode_frame_invocation_reads_respects_f: + notes reads_respects_f_inv' = reads_respects_f_inv[where st=st] + notes whenE_wps[wp_split del] + shows + "reads_respects_f aag l + (silc_inv aag st and invs and pas_refined aag and cte_wp_at ((=) (cap.ArchObjectCap cap)) slot + and (\s. \(cap, slot) \ set excaps. cte_wp_at ((=) cap) slot s) + and valid_arch_cap cap and K (cap = FrameCap p R sz dev m) + and K (\(cap, slot) \ {(cap.ArchObjectCap cap, slot)} \ set excaps. + aag_cap_auth aag (pasObjectAbs aag (fst slot)) cap \ + is_subject aag (fst slot) \ + (\v \ cap_asid' cap. is_subject_asid aag v))) + (decode_frame_invocation label args slot cap excaps)" + unfolding decode_frame_invocation_def decode_fr_inv_map_def + check_vp_alignment_def gets_the_def + apply (rule gen_asm_ev)+ + supply gets_the_ev[wp del] + apply (case_tac "invocation_type label = ArchInvocationLabel ARMPageMap") + apply (clarsimp split del: if_split) + apply (rule equiv_valid_guard_imp) + apply ((wp gets_ev' check_vp_wpR reads_respects_f_inv'[OF get_asid_pool_rev] + reads_respects_f_inv'[OF ensure_empty_rev] + reads_respects_f_inv'[OF get_pte_rev] + reads_respects_f_inv'[OF lookup_slot_for_cnode_op_rev] + reads_respects_f_inv'[OF ensure_no_children_rev] + reads_respects_f_inv'[OF lookup_error_on_failure_rev] + find_vspace_for_asid_reads_respects + is_final_cap_reads_respects + select_ext_ev_bind_lift + select_ext_ev_bind_lift[simplified] + | wpc + | simp add: Let_def unlessE_whenE + | wp (once) whenE_throwError_wp)+)[1] + apply clarsimp + apply (drule_tac x="excaps ! 0" in bspec, fastforce intro: bang_0_in_set)+ + apply clarsimp + apply (rule conjI) + apply (fastforce dest: cte_wp_valid_cap simp: valid_cap_def wellformed_mapdata_def) + apply clarsimp + apply (case_tac "m = None \ \ user_vtop < args ! 0 + mask (pageBitsForSize sz) \ (m = Some (asid, args ! 0))") + prefer 2 + apply clarsimp + apply (prop_tac "\ user_vtop < args ! 0 + mask (pageBitsForSize sz) \ args ! 0 \ user_region") + apply (clarsimp simp: user_region_def not_le) + apply (rule user_vtop_leq_canonical_user) + apply (simp add: vmsz_aligned_def not_less) + apply (drule is_aligned_no_overflow_mask) + apply simp + apply (prop_tac "args ! 0 \ user_region") + apply (fastforce simp: valid_arch_cap_def wellformed_mapdata_def) + apply (subgoal_tac "(\t. reads_equiv_f aag s t \ affects_equiv aag l s t \ + pt_lookup_slot pt (args ! 0) (ptes_of s) = pt_lookup_slot pt (args ! 0) (ptes_of t))") + apply clarsimp + apply (clarsimp simp: reads_equiv_f_def) + apply (frule vspace_for_asid_vs_lookup) + apply (frule_tac pt=pt and level=max_pt_level and bot_level=0 in pt_walk_reads_equiv, + (fastforce dest: aag_has_Control_iff_owns + elim: vs_lookup_table_vref_independent + simp: aag_cap_auth_def cap_auth_conferred_def arch_cap_auth_conferred_def + pt_lookup_slot_def pt_lookup_slot_from_level_def obind_def + split: option.splits)+)[1] + apply (case_tac "invocation_type label = ArchInvocationLabel ARMPageUnmap") + apply wpsimp + apply (case_tac "invocation_type label = ArchInvocationLabel ARMPageGetAddress") + apply wpsimp + apply (case_tac "isPageFlushLabel (invocation_type label)") + prefer 2 + apply wpsimp + apply (clarsimp split del: if_split) + apply (unfold decode_fr_inv_flush_def) + apply (rule equiv_valid_guard_imp) + apply ((wp gets_ev' check_vp_wpR reads_respects_f_inv'[OF get_asid_pool_rev] + reads_respects_f_inv'[OF ensure_empty_rev] + reads_respects_f_inv'[OF get_pte_rev] + reads_respects_f_inv'[OF lookup_slot_for_cnode_op_rev] + reads_respects_f_inv'[OF ensure_no_children_rev] + reads_respects_f_inv'[OF lookup_error_on_failure_rev] + find_vspace_for_asid_reads_respects + is_final_cap_reads_respects + select_ext_ev_bind_lift + select_ext_ev_bind_lift[simplified] + | wpc + | simp add: Let_def unlessE_whenE + | wp (once) whenE_throwError_wp)+)[1] + apply clarsimp + apply (clarsimp simp: valid_arch_cap_def wellformed_mapdata_def) + done + +lemma helper: + "reads_respects_f aag l (pt_at pt_t (table_base pt_t p) and K (is_subject aag (table_base pt_t p))) + (do m <- gets (swp ptes_of pt_t); + assert_opt (m p) + od)" + apply (auto simp: equiv_valid_def2 equiv_valid_2_def bind_def gets_def assert_opt_def fail_def + get_def return_def split: option.splits) + apply (drule ptes_of_reads_equiv) + apply (fastforce simp: reads_equiv_f_def) + apply clarsimp + done + +definition foldme where + "foldme level slot \ liftE $ do m <- gets (swp ptes_of (level_type level)); + assert_opt (m slot) + od" + +lemma decode_page_table_invocation_reads_respects_f: + notes reads_respects_f_inv' = reads_respects_f_inv[where st=st] + notes whenE_wps[wp_split del] + shows + "reads_respects_f aag l + (silc_inv aag st and invs and pas_refined aag and cte_wp_at ((=) (cap.ArchObjectCap cap)) slot + and (\s. \(cap, slot) \ set excaps. cte_wp_at ((=) cap) slot s) + and valid_arch_cap cap and K (cap = PageTableCap p pt_t m) + and K (\(cap, slot) \ {(cap.ArchObjectCap cap, slot)} \ set excaps. + aag_cap_auth aag (pasObjectAbs aag (fst slot)) cap \ + is_subject aag (fst slot) \ + (\v \ cap_asid' cap. is_subject_asid aag v))) + (decode_page_table_invocation label args slot cap excaps)" + unfolding decode_page_table_invocation_def decode_pt_inv_map_def gets_the_def gets_map_def + supply gets_the_ev[wp del] + apply (rule equiv_valid_guard_imp) + apply (fold foldme_def) + apply ((wp gets_ev' check_vp_wpR reads_respects_f_inv'[OF get_asid_pool_rev] + reads_respects_f_inv'[OF ensure_empty_rev] + reads_respects_f_inv'[OF get_pte_rev] + reads_respects_f_inv'[OF lookup_slot_for_cnode_op_rev] + reads_respects_f_inv'[OF ensure_no_children_rev] + reads_respects_f_inv'[OF lookup_error_on_failure_rev] + find_vspace_for_asid_reads_respects + is_final_cap_reads_respects + select_ext_ev_bind_lift + select_ext_ev_bind_lift[simplified] + | simp add: Let_def unlessE_whenE if_fun_split + | wpc + | wp (once) whenE_throwError_wp hoare_drop_imps)+)[1] + apply (unfold foldme_def) + apply simp + apply (wp (once)) + apply (rule helper) + apply ((wp gets_ev' check_vp_wpR reads_respects_f_inv'[OF get_asid_pool_rev] + reads_respects_f_inv'[OF ensure_empty_rev] + reads_respects_f_inv'[OF get_pte_rev] + reads_respects_f_inv'[OF lookup_slot_for_cnode_op_rev] + reads_respects_f_inv'[OF ensure_no_children_rev] + reads_respects_f_inv'[OF lookup_error_on_failure_rev] + find_vspace_for_asid_reads_respects + is_final_cap_reads_respects + select_ext_ev_bind_lift + select_ext_ev_bind_lift[simplified] + | simp add: Let_def unlessE_whenE if_fun_split + | wpc + | wp (once) whenE_throwError_wp hoare_drop_imps)+) + apply clarsimp + apply (rule conjI; clarsimp) + apply (drule_tac x="excaps ! 0" in bspec, fastforce intro: bang_0_in_set)+ + apply (prop_tac "args ! 0 \ user_region") + apply (clarsimp simp: user_region_def not_le) + apply (rule user_vtop_leq_canonical_user) + apply (simp add: vmsz_aligned_def not_less) + apply (clarsimp cong: conj_cong imp_cong) + apply (rule conjI) + apply (fastforce dest: cte_wp_valid_cap simp: valid_cap_def wellformed_mapdata_def) + apply (intro impI) + apply (rule context_conjI; clarsimp) + apply (clarsimp simp: reads_equiv_f_def) + apply (frule vspace_for_asid_vs_lookup) + apply (frule_tac pt=pt and level=max_pt_level and bot_level=0 in pt_walk_reads_equiv, + (fastforce dest: aag_has_Control_iff_owns + elim: vs_lookup_table_vref_independent + simp: aag_cap_auth_def cap_auth_conferred_def arch_cap_auth_conferred_def + pt_lookup_slot_def pt_lookup_slot_from_level_def obind_def + split: option.splits)+)[1] + + apply (rule conjI) + apply (frule (3) pt_lookup_slot_pte_at) + apply (clarsimp simp: pte_at_def2) + apply (frule vspace_for_asid_is_subject, fastforce+) + apply (clarsimp simp: pt_lookup_slot_def) + apply (erule pt_lookup_slot_from_level_is_subject) + apply fastforce+ + apply (fastforce dest: vspace_for_asid_vs_lookup vs_lookup_table_vref_independent) + apply clarsimp+ + apply (intro conjI) + apply fastforce + apply fastforce + apply fastforce + apply (fastforce dest: silc_inv_not_subject) + done + +lemma decode_vspace_invocation_reads_respects_f: + notes reads_respects_f_inv' = reads_respects_f_inv[where st=st] + notes whenE_wps[wp_split del] + shows + "reads_respects_f aag l + (silc_inv aag st and invs and pas_refined aag and cte_wp_at ((=) (cap.ArchObjectCap cap)) slot + and (\s. \(cap, slot) \ set excaps. cte_wp_at ((=) cap) slot s) + and valid_arch_cap cap + and K (\(cap, slot) \ {(cap.ArchObjectCap cap, slot)} \ set excaps. + aag_cap_auth aag (pasObjectAbs aag (fst slot)) cap \ + is_subject aag (fst slot) \ + (\v \ cap_asid' cap. is_subject_asid aag v))) + (decode_vspace_invocation label args slot cap excaps)" + unfolding decode_vspace_invocation_def decode_vs_inv_flush_def + sorry + +lemma decode_vcpu_invocation_reads_respects_f: + notes reads_respects_f_inv' = reads_respects_f_inv[where st=st] + notes whenE_wps[wp_split del] + shows + "reads_respects_f aag l + (silc_inv aag st and invs and pas_refined aag and cte_wp_at ((=) (cap.ArchObjectCap cap)) slot + and (\s. \(cap, slot) \ set excaps. cte_wp_at ((=) cap) slot s) + and valid_arch_cap cap + and K (\(cap, slot) \ {(cap.ArchObjectCap cap, slot)} \ set excaps. + aag_cap_auth aag (pasObjectAbs aag (fst slot)) cap \ + is_subject aag (fst slot) \ + (\v \ cap_asid' cap. is_subject_asid aag v))) + (decode_vcpu_invocation label args cap excaps)" + unfolding decode_vcpu_invocation_def + sorry + +lemma decode_sgi_signal_invocation_reads_respects_f[wp]: + "reads_respects_f aag l \ + (decode_sgi_signal_invocation (SGISignalCap x61 x62))" + unfolding decode_sgi_signal_invocation_def + by wpsimp + +lemma arch_decode_invocation_reads_respects_f[Decode_IF_assms]: + notes reads_respects_f_inv' = reads_respects_f_inv[where st=st] + notes whenE_wps[wp_split del] + shows + "reads_respects_f aag l + (silc_inv aag st and invs and pas_refined aag and cte_wp_at ((=) (cap.ArchObjectCap cap)) slot + and (\s. \(cap, slot) \ set excaps. cte_wp_at ((=) cap) slot s) + and K (\(cap, slot) \ {(cap.ArchObjectCap cap, slot)} \ set excaps. + aag_cap_auth aag (pasObjectAbs aag (fst slot)) cap \ + is_subject aag (fst slot) \ + (\v \ cap_asid' cap. is_subject_asid aag v))) + (arch_decode_invocation label args cap_index slot cap excaps)" + unfolding arch_decode_invocation_def + apply (cases cap; clarsimp; rule equiv_valid_guard_imp) + by (wpsimp wp: decode_asid_pool_invocation_reads_respects_f + decode_asid_control_invocation_reads_respects_f + decode_frame_invocation_reads_respects_f + decode_vspace_invocation_reads_respects_f + decode_vcpu_invocation_reads_respects_f + decode_page_table_invocation_reads_respects_f + | fastforce dest: caps_of_state_valid cte_wp_at_caps_of_state' + simp: valid_cap_def valid_arch_cap_def)+ + +end + + +global_interpretation Decode_IF_2?: Decode_IF_2 +proof goal_cases + interpret Arch . + case 1 show ?case + by (unfold_locales; (fact Decode_IF_assms)?) +qed + +end diff --git a/proof/infoflow/AARCH64/ArchFinalCaps.thy b/proof/infoflow/AARCH64/ArchFinalCaps.thy new file mode 100644 index 0000000000..f75bf68eda --- /dev/null +++ b/proof/infoflow/AARCH64/ArchFinalCaps.thy @@ -0,0 +1,461 @@ +(* + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +theory ArchFinalCaps +imports FinalCaps +begin + +context Arch begin global_naming AARCH64 + +named_theorems FinalCaps_assms + +lemma FIXME_arch_gen_refs[FinalCaps_assms]: + "arch_gen_refs cap = {}" + by (clarsimp simp: arch_cap_set_map_def arch_gen_obj_refs_def split: cap.splits) + +lemma aobj_ref_same_aobject[FinalCaps_assms]: + "same_aobject_as cp cp' \ aobj_ref cp = aobj_ref cp'" + by (cases cp; cases cp'; clarsimp) + +lemma set_pt_silc_inv[wp]: + "set_pt ptr pt \silc_inv aag st\" + unfolding set_pt_def + apply (rule silc_inv_pres) + apply (wpsimp wp: set_object_wp_strong simp: a_type_def split: kernel_object.splits) + apply (fastforce simp: silc_inv_def obj_at_def is_cap_table_def) + apply (wp set_object_wp get_object_wp | simp)+ + apply (case_tac "ptr = fst slot") + apply (clarsimp split: kernel_object.splits) + apply (fastforce elim: cte_wp_atE simp: obj_at_def) + apply (fastforce elim: cte_wp_atE intro: cte_wp_at_cteI cte_wp_at_tcbI) + done + +lemma set_asid_pool_silc_inv[wp]: + "set_asid_pool ptr pool \silc_inv aag st\" + unfolding set_asid_pool_def + apply (rule silc_inv_pres) + apply (wpsimp wp: set_object_wp_strong simp: a_type_def split: kernel_object.splits) + apply (fastforce simp: silc_inv_def obj_at_def is_cap_table_def) + apply (wp set_object_wp get_object_wp | simp)+ + apply (case_tac "ptr = fst slot") + apply (clarsimp split: kernel_object.splits) + apply (fastforce elim: cte_wp_atE simp: obj_at_def) + apply (fastforce elim: cte_wp_atE intro: cte_wp_at_cteI cte_wp_at_tcbI) + done + +lemma set_vcpu_silc_inv: + "\silc_inv aag st\ + set_vcpu ptr vcpu + \\_. silc_inv aag st\" + unfolding set_vcpu_def + apply (rule silc_inv_pres) + apply (wpsimp wp: set_object_wp_strong get_object_wp simp: obj_at_def) + apply (drule (1) silc_inv_cnode_only) + apply (fastforce simp: silc_inv_def obj_at_def is_cap_table_def split: kernel_object.splits) + apply (wpsimp wp: set_object_wp get_object_wp) + apply (wpsimp wp: set_object_wp_strong get_object_wp simp: obj_at_def) + apply (case_tac "ptr = fst (a,b)") + apply (fastforce elim: cte_wp_atE simp: obj_at_def) + apply (fastforce elim: cte_wp_atE intro: cte_wp_at_cteI cte_wp_at_tcbI) + done + +crunch vcpu_switch + for silc_inv[wp]: "silc_inv aag st" + (wp: mapM_x_wp_inv mapM_wp_inv) + +crunch associate_vcpu_tcb + for silc_inv[wp]: "silc_inv aag st" + (wp: crunch_wps simp: arch_thread_set_is_thread_set) + +crunch arch_finalise_cap, prepare_thread_delete + for silc_inv[FinalCaps_assms, wp]: "silc_inv aag st" + (wp: crunch_wps modify_wp simp: crunch_simps ignore: set_object) + +crunch init_arch_objects + for silc_inv[FinalCaps_assms, wp]: "silc_inv aag st" + (wp: crunch_wps modify_wp simp: crunch_simps ignore: set_object) + +crunch handle_vm_fault, handle_arch_fault_reply, arch_invoke_irq_handler, arch_mask_irq_signal, + arch_post_cap_deletion, arch_post_modify_registers, arch_activate_idle_thread, + arch_switch_to_idle_thread, arch_switch_to_thread + for silc_inv[FinalCaps_assms, wp]: "silc_inv aag st" + +lemma arch_derive_cap_silc[FinalCaps_assms]: + "\\s. cap = ArchObjectCap acap \ + (\ cap_points_to_label aag cap l \ R (slots_holding_overlapping_caps cap s))\ + arch_derive_cap acap + \\cap' s. \ cap_points_to_label aag cap' l \ R (slots_holding_overlapping_caps cap' s)\, -" + apply (simp add: arch_derive_cap_def) + apply wpsimp + apply (auto simp: cap_points_to_label_def slots_holding_overlapping_caps_def) + done + +declare init_arch_objects_cte_wp_at[FinalCaps_assms] +declare handle_vm_fault_cur_thread[FinalCaps_assms] +declare finalise_cap_makes_halted[FinalCaps_assms] + +end + + +global_interpretation FinalCaps_1?: FinalCaps_1 +proof goal_cases + interpret Arch . + case 1 show ?case + by (unfold_locales; (fact FinalCaps_assms)?) +qed + + +context Arch begin global_naming AARCH64 + +lemma perform_page_table_invocation_silc_inv_get_cap_helper: + "\silc_inv aag st and cte_wp_at (is_pt_cap or is_frame_cap) xa\ + get_cap xa + \(\capa s. (\ cap_points_to_label aag (ArchObjectCap $ update_map_data capa None) + (pasObjectAbs aag (fst xa)) + \ (\lslot. lslot \ slots_holding_overlapping_caps + (ArchObjectCap $ update_map_data capa None) s \ + pasObjectAbs aag (fst lslot) = SilcLabel))) \ the_arch_cap\" + apply (wp get_cap_wp) + apply clarsimp + apply (drule cte_wp_at_norm) + apply (clarify) + apply (drule (1) cte_wp_at_eqD2) + apply (case_tac cap, simp_all add: is_frame_cap_def is_pt_cap_def) + apply (clarsimp simp: cap_points_to_label_def update_map_data_def split: arch_cap.splits) + apply (drule silc_invD) + apply assumption + apply (fastforce simp: intra_label_cap_def cap_points_to_label_def) + apply (fastforce simp: slots_holding_overlapping_caps_def2 ctes_wp_at_def) + apply (drule silc_invD) + apply assumption + apply (fastforce simp: intra_label_cap_def cap_points_to_label_def) + apply (fastforce simp: slots_holding_overlapping_caps_def2 ctes_wp_at_def) + done + +lemmas perform_page_table_invocation_silc_inv_get_cap_helper' = + perform_page_table_invocation_silc_inv_get_cap_helper[simplified o_def fun_app_def] + +crunch store_pte + for silc_inv[FinalCaps_assms, wp]: "silc_inv aag st" + +lemma mapM_x_swp_store_pte_silc_inv[wp]: + "mapM_x (swp (store_pte pt_t) A) slots \silc_inv aag st\" + by (wp mapM_x_wp[OF _ subset_refl] | simp add: swp_def)+ + +lemma is_arch_eq_pt_is_pt_or_frame_cap: + "cte_wp_at ((=) (ArchObjectCap (PageTableCap pt_t xa xb))) slot s + \ cte_wp_at (\a. is_pt_cap a \ is_frame_cap a) slot s" + apply (erule cte_wp_at_weakenE) + by (clarsimp simp: is_frame_cap_def is_pt_cap_def) + +lemma is_arch_eq_pg_is_pt_or_pg_cap: + "cte_wp_at ((=) (ArchObjectCap (FrameCap xa xb xc xd xe))) slot s + \ cte_wp_at (\a. is_pt_cap a \ is_frame_cap a) slot s" + apply (erule cte_wp_at_weakenE) + by (clarsimp simp: is_frame_cap_def is_pt_cap_def) + +crunch unmap_page_table + for silc_inv[FinalCaps_assms, wp]: "silc_inv aag st" + +lemma perform_page_table_invocation_silc_inv: + "\silc_inv aag st and valid_pti blah and K (authorised_page_table_inv aag blah)\ + perform_page_table_invocation blah + \\_. silc_inv aag st\" + unfolding perform_page_table_invocation_def perform_pt_inv_map_def perform_pt_inv_unmap_def + apply (rule hoare_pre) + apply (wp set_cap_silc_inv mapM_x_wp[OF _ subset_refl] + perform_page_table_invocation_silc_inv_get_cap_helper'[where st=st] + | wpc | simp only: o_def fun_app_def K_def swp_def)+ + apply (clarsimp simp: valid_pti_def authorised_page_table_inv_def + split: page_table_invocation.splits) + apply (rule conjI) + apply (clarsimp) + defer + apply (fastforce simp: silc_inv_def) + apply (fastforce dest: is_arch_eq_pt_is_pt_or_frame_cap + simp: silc_inv_def is_PageTableCap_def pred_disj_def) + apply (drule_tac slot="(aa,ba)" in overlapping_slots_have_labelled_overlapping_caps[rotated]) + apply (fastforce) + apply (fastforce elim: is_arch_update_overlaps[rotated] cte_wp_at_weakenE) + apply fastforce + done + +crunch invalidate_tlb_by_asid_va, perform_flush + for silc_inv[FinalCaps_assms, wp]: "silc_inv aag st" + +crunch unmap_page + for silc_inv[FinalCaps_assms, wp]: "silc_inv aag st" + (simp: crunch_simps) + +lemma perform_page_invocation_silc_inv: + "\silc_inv aag st and valid_page_inv blah and authorised_page_inv aag blah\ + perform_page_invocation blah + \\_. silc_inv aag st\" + unfolding perform_page_invocation_def perform_pg_inv_map_def perform_pg_inv_unmap_def perform_pg_inv_get_addr_def + apply (rule hoare_pre) + apply (wp mapM_wp[OF _ subset_refl] set_cap_silc_inv + mapM_x_wp[OF _ subset_refl] + perform_page_table_invocation_silc_inv_get_cap_helper'[where st=st] + hoare_vcg_all_lift hoare_vcg_if_lift hoare_weak_lift_imp + | wpc + | simp only: swp_def o_def fun_app_def K_def + | wp (once) hoare_drop_imps)+ + apply (clarsimp simp: valid_page_inv_def authorised_page_inv_def + split: page_invocation.splits) + apply (intro allI impI conjI) + apply (drule_tac slot="(ac,bb)" in overlapping_slots_have_labelled_overlapping_caps[rotated]) + apply (fastforce)+ + apply (fastforce elim: is_arch_update_overlaps[rotated] cte_wp_at_weakenE) + apply fastforce+ + apply (fastforce simp: silc_inv_def) + apply (drule_tac slot="(ac,bb)" in overlapping_slots_have_labelled_overlapping_caps[rotated]) + apply (fastforce)+ + apply (fastforce elim: is_arch_update_overlaps[rotated] cte_wp_at_weakenE) + apply fastforce+ + apply (fastforce simp: silc_inv_def) + apply (fastforce dest: is_arch_eq_pg_is_pt_or_pg_cap simp: silc_inv_def pred_disj_def) + done + +lemma perform_asid_control_invocation_silc_inv: + notes blah[simp del] = atLeastAtMost_iff atLeastatMost_subset_iff atLeastLessThan_iff + shows + "\silc_inv aag st and valid_aci blah and invs and K (authorised_asid_control_inv aag blah)\ + perform_asid_control_invocation blah + \\_. silc_inv aag st\" + apply (rule hoare_gen_asm) + unfolding perform_asid_control_invocation_def + apply (rule hoare_pre) + apply (wp modify_wp cap_insert_silc_inv' retype_region_silc_inv[where sz=pageBits] + set_cap_silc_inv get_cap_slots_holding_overlapping_caps[where st=st] + delete_objects_silc_inv hoare_weak_lift_imp + | wpc | simp )+ + apply (clarsimp simp: authorised_asid_control_inv_def silc_inv_def valid_aci_def ptr_range_def) + apply (rule conjI) + apply (clarsimp simp: range_cover_def obj_bits_api_def default_arch_object_def asid_bits_def pageBits_def) + apply (rule of_nat_inverse) + apply simp + apply (drule is_aligned_neg_mask_eq'[THEN iffD1, THEN sym]) + apply (erule_tac t=x in ssubst) + apply (simp add: mask_AND_NOT_mask) + apply simp + apply (simp add: p_assoc_help) + apply (clarsimp simp: cap_points_to_label_def) + apply (erule bspec) + apply (fastforce intro: is_aligned_no_wrap' simp: blah) + done + +crunch store_asid_pool_entry, handle_spurious_irq + for silc_inv[wp]: "silc_inv aag st" + +lemma perform_asid_pool_invocation_silc_inv: + "\silc_inv aag st and K (authorised_asid_pool_inv aag blah)\ + perform_asid_pool_invocation blah + \\_. silc_inv aag st\" + unfolding perform_asid_pool_invocation_def + apply (wpsimp wp: set_cap_silc_inv get_cap_wp)+ + apply (fastforce dest: silc_invD + simp: intra_label_cap_def cap_points_to_label_def silc_inv_def + slots_holding_overlapping_caps_def authorised_asid_pool_inv_def + is_ArchObjectCap_def is_PageTableCap_def update_map_data_def)+ + done + +crunch perform_vcpu_invocation, perform_vspace_invocation, perform_sgi_invocation + for silc_inv[wp]: "silc_inv aag st" + +declare handle_spurious_irq_silc_inv[wp, FinalCaps_assms] + +lemma arch_perform_invocation_silc_inv[FinalCaps_assms]: + "\silc_inv aag st and invs and valid_arch_inv ai and authorised_arch_inv aag ai\ + arch_perform_invocation ai + \\_. silc_inv aag st\" + unfolding arch_perform_invocation_def + apply (rule hoare_pre) + apply (wp perform_page_table_invocation_silc_inv + perform_page_invocation_silc_inv + perform_asid_control_invocation_silc_inv + perform_asid_pool_invocation_silc_inv + perform_vcpu_invocation_silc_inv + | wpc)+ + apply (clarsimp simp: authorised_arch_inv_def valid_arch_inv_def split: arch_invocation.splits) + done + +lemma new_irq_handler_caps_are_intra_label: + "\ cte_wp_at ((=) (IRQControlCap)) slot s; pas_refined aag s; is_subject aag (fst slot) \ + \ cap_points_to_label aag (IRQHandlerCap irq) (pasSubject aag)" + apply (clarsimp simp: cap_points_to_label_def) + apply (frule cap_cur_auth_caps_of_state[rotated]) + apply assumption + apply (simp add: cte_wp_at_caps_of_state) + apply (clarsimp simp: aag_cap_auth_def cap_links_irq_def) + apply (blast intro: aag_Control_into_owns_irq) + done + +lemma arch_invoke_irq_control_silc_inv[FinalCaps_assms]: + "\silc_inv aag st and pas_refined aag and arch_irq_control_inv_valid arch_irq_cinv + and K (arch_authorised_irq_ctl_inv aag arch_irq_cinv)\ + arch_invoke_irq_control arch_irq_cinv + \\_. silc_inv aag st\" + unfolding arch_authorised_irq_ctl_inv_def + apply (rule hoare_gen_asm) + apply (case_tac arch_irq_cinv) + apply (wp cap_insert_silc_inv'' hoare_vcg_ex_lift slots_holding_overlapping_caps_lift + | simp add: authorised_irq_ctl_inv_def arch_irq_control_inv_valid_def)+ + apply (fastforce dest: new_irq_handler_caps_are_intra_label) + apply (wpsimp wp: cap_insert_silc_inv'' simp: cap_points_to_label_def) + done + +crunch set_priority, set_flags + for silc_inv[wp]: "silc_inv aag st" + (simp: tcb_cap_cases_def) + +crunch arch_prepare_set_domain, arch_prepare_next_domain, arch_post_set_flags + for silc_inv[FinalCaps_assms, wp]: "silc_inv aag st" + (wp: crunch_wps) + + +lemma tcb_cap_cases_tcb_fault: + "\(getF, a, b)\ran tcb_cap_cases. + getF (tcb_fault_update F tcb) = getF tcb" + by (rule ball_tcb_cap_casesI, simp+) + +lemma case_option_wp_returnOk: + assumes [wp]: "\x. \P x\ f x \\_. Q\" + shows "\Q and (\s. opt \ None \ P (the opt) s)\ + (case opt of None \ returnOk rv | Some x \ f x) + \\_. Q\" + by (cases opt; wpsimp) + +lemma case_option_wp_return: + assumes [wp]: "\x. \P x\ f x \\_. Q\" + shows "\Q and (\s. opt \ None \ P (the opt) s)\ + (case opt of None \ return rv | Some x \ f x) + \\_. Q\" + by (cases opt; wpsimp) + +lemma invoke_tcb_silc_inv[FinalCaps_assms]: + notes hoare_weak_lift_imp [wp] + hoare_weak_lift_imp_conj [wp] + shows "\silc_inv aag st and einvs and simple_sched_action and pas_refined aag and tcb_inv_wf tinv + and K (authorised_tcb_inv aag tinv)\ + invoke_tcb tinv + \\_. silc_inv aag st\" + apply (case_tac tinv) + apply ((wp restart_silc_inv hoare_vcg_if_lift suspend_silc_inv mapM_x_wp[OF _ subset_refl] + hoare_weak_lift_imp + | wpc + | simp split del: if_split add: authorised_tcb_inv_def check_cap_at_def + | clarsimp + | strengthen invs_mdb + | force intro: notE[rotated,OF idle_no_ex_cap,simplified])+)[3] + defer + apply ((wp suspend_silc_inv restart_silc_inv | simp add: authorised_tcb_inv_def | force)+)[2] + (* NotificationControl *) + apply (rename_tac option) + apply (case_tac option; (wp | simp)+) + (* SetTLSBase *) + apply (wpsimp split: option.splits) + (* SetFlags *) + apply (wpsimp split: option.splits) + (* just ThreadControl left *) + apply (simp add: split_def cong: option.case_cong) + (* slow, ~2 mins *) + apply (strengthen use_no_cap_to_obj_asid_strg + | clarsimp + | simp only: conj_ac cong: conj_cong imp_cong + | wp case_option_wp_returnOk case_option_wp_return + checked_insert_pas_refined checked_cap_insert_silc_inv hoare_vcg_all_liftE_R + hoare_vcg_all_lift hoare_vcg_const_imp_liftE_R + cap_delete_silc_inv_not_transferable + cap_delete_pas_refined' cap_delete_deletes + cap_delete_valid_cap cap_delete_cte_at + check_cap_inv[where P="valid_cap c" for c] + check_cap_inv[where P="cte_at p0" for p0] + check_cap_inv[where P="\s. \ tcb_at t s" for t] + check_cap_inv2[where Q="\_. valid_list"] + check_cap_inv2[where Q="\_. valid_sched"] + check_cap_inv2[where Q="\_. simple_sched_action"] + checked_insert_no_cap_to + thread_set_tcb_fault_handler_update_invs + thread_set_pas_refined thread_set_emptyable thread_set_valid_cap + thread_set_not_state_valid_sched thread_set_cte_at + thread_set_no_cap_to_trivial + | wpc + | simp add: emptyable_def tcb_cap_cases_def tcb_cap_valid_def + st_tcb_at_triv option_update_thread_def + | strengthen use_no_cap_to_obj_asid_strg invs_mdb + invs_psp_aligned invs_vspace_objs invs_arch_state + | wp (once) hoare_drop_imps + | elim disjE; solves clarsimp)+ + (* also slow, ~30s *) + prefer 1 + apply (clarsimp simp: is_cap_simps) + apply (clarsimp split: option.split_asm) + apply (clarsimp simp: is_cap_simps is_cnode_or_valid_arch_def is_valid_vtable_root_def + authorised_tcb_inv_def emptyable_def + split: cap.splits option.splits pt_type.splits arch_cap.splits)+ + done + +end + + +global_interpretation FinalCaps_2?: FinalCaps_2 +proof goal_cases + interpret Arch . + case 1 show ?case + by (unfold_locales; (fact FinalCaps_assms)?) +qed + + +context Arch begin global_naming AARCH64 + +lemma handle_hypervisor_fault_silc_inv[FinalCaps_assms]: + "\silc_inv aag st and invs and pas_refined aag and is_subject aag o cur_thread and K (is_subject aag t)\ + handle_hypervisor_fault t ex + \\_. silc_inv aag st\" + apply (case_tac ex; clarsimp split del: if_split) + apply (wpsimp wp: handle_fault_silc_inv simp: valid_fault_def) + done + +lemma vppi_event_silc_inv: + "\silc_inv aag st and invs and pas_refined aag and (\s. ct_active s \ is_subject aag (cur_thread s))\ + vppi_event irq + \\_. silc_inv aag st\" + unfolding vppi_event_def + apply (wpsimp wp: gts_wp hoare_vcg_all_lift vcpu_update_trivial_invs maskInterrupt_invs + hoare_vcg_imp_lift | wps | wp dmo_wp)+ + apply (clarsimp simp: valid_fault_def) + using ct_active_st_tcb_at_weaken runnable_eq by blast + +lemma vgic_maintenance_silc_inv: + "\silc_inv aag st and invs and pas_refined aag and (\s. ct_active s \ is_subject aag (cur_thread s))\ + vgic_maintenance + \\_. silc_inv aag st\" + unfolding vgic_maintenance_def + apply (wpsimp wp: gts_wp hoare_vcg_all_lift hoare_weak_lift_imp dmo_invs_lift + simp: crunch_simps valid_fault_def split_del: if_split + | wps | wp (once) hoare_drop_imps)+ + using ct_active_st_tcb_at_weaken runnable_eq by blast + +lemma handle_reserved_irq_silc_inv[FinalCaps_assms]: + "\silc_inv aag st and invs and pas_refined aag and (\s. ct_active s \ is_subject aag (cur_thread s))\ + handle_reserved_irq irq + \\_. silc_inv aag st\" + unfolding handle_reserved_irq_def + by (cases "irq = irqVGICMaintenance"; wpsimp wp: vgic_maintenance_silc_inv vppi_event_silc_inv) + +end + + +global_interpretation FinalCaps_3?: FinalCaps_3 +proof goal_cases + interpret Arch . + case 1 show ?case + apply (unfold_locales; (fact FinalCaps_assms)?) + sorry (* FIXME AARCH64 IF: weaken assumptions *) +qed + + +end diff --git a/proof/infoflow/AARCH64/ArchFinalise_IF.thy b/proof/infoflow/AARCH64/ArchFinalise_IF.thy new file mode 100644 index 0000000000..fab3fb82fb --- /dev/null +++ b/proof/infoflow/AARCH64/ArchFinalise_IF.thy @@ -0,0 +1,373 @@ +(* + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +theory ArchFinalise_IF +imports Finalise_IF +begin + +context Arch begin global_naming AARCH64 + +named_theorems Finalise_IF_assms + +crunch arch_post_cap_deletion + for globals_equiv[Finalise_IF_assms, wp]: "globals_equiv st" + and valid_arch_state[Finalise_IF_assms,wp]: valid_arch_state + +lemma dmo_maskInterrupt_reads_respects[Finalise_IF_assms]: + "reads_respects aag l \ (do_machine_op (maskInterrupt m irq))" + unfolding maskInterrupt_def + apply (rule use_spec_ev) + apply (rule do_machine_op_spec_reads_respects) + apply (simp add: equiv_valid_def2) + apply (rule modify_ev2) + apply (fastforce simp: equiv_for_def) + apply (wp modify_wp | simp)+ + done + +lemma arch_post_cap_deletion_read_respects[Finalise_IF_assms, wp]: + "reads_respects aag l \ (arch_post_cap_deletion acap)" + by wpsimp + +lemma equiv_asid_sa_update[Finalise_IF_assms, simp]: + "equiv_asid asid (scheduler_action_update f s) s' = equiv_asid asid s s'" + "equiv_asid asid s (scheduler_action_update f s') = equiv_asid asid s s'" + by (auto simp: equiv_asid_def) + +lemma equiv_asid_ready_queues_update[Finalise_IF_assms, simp]: + "equiv_asid asid (ready_queues_update f s) s' = equiv_asid asid s s'" + "equiv_asid asid s (ready_queues_update f s') = equiv_asid asid s s'" + by (auto simp: equiv_asid_def) + +lemma arch_finalise_cap_makes_halted[Finalise_IF_assms]: + "\invs and valid_cap (ArchObjectCap arch_cap) + and (\s. ex = is_final_cap' (ArchObjectCap arch_cap) s) + and cte_wp_at ((=) (ArchObjectCap arch_cap)) slot\ + arch_finalise_cap arch_cap ex + \\rv s. \t \ obj_refs_ac (fst rv). halted_if_tcb t s\" + by (wpsimp simp: arch_finalise_cap_def) + +(* FIXME: move *) +lemma set_object_modifies_at_most: + "modifies_at_most aag {pasObjectAbs aag ptr} + (\s. \ asid_pool_at ptr s \ (\asid_pool. obj \ ArchObj (ASIDPool asid_pool))) + (set_object ptr obj)" + apply (rule modifies_at_mostI) + apply (wp set_object_equiv_but_for_labels) + apply clarsimp + done + +lemma set_thread_state_reads_respects[Finalise_IF_assms]: + assumes domains_distinct: "pas_domains_distinct aag" + shows "reads_respects aag l (\s. is_subject aag (cur_thread s)) (set_thread_state ref ts)" + unfolding set_thread_state_def fun_app_def + apply (simp add: bind_assoc[symmetric]) + apply (rule pre_ev) + apply (rule_tac P'=\ in bind_ev) + apply (rule set_thread_state_act_reads_respects) + apply (case_tac "aag_can_read aag ref \ aag_can_affect aag l ref") + apply (wp set_object_reads_respects gets_the_ev) + apply (fastforce simp: get_tcb_def split: option.splits + elim: reads_equivE affects_equivE equiv_forE) + apply (simp add: equiv_valid_def2) + apply (rule equiv_valid_rv_bind) + apply (rule equiv_valid_rv_trivial) + apply (wp | simp)+ + apply (rule_tac P=\ and P'=\ and L="{pasObjectAbs aag ref}" and L'="{pasObjectAbs aag ref}" + in ev2_invisible[OF domains_distinct]) + apply (blast | simp add: labels_are_invisible_def)+ + apply (rule set_object_modifies_at_most) + apply (rule set_object_modifies_at_most) + apply (simp | wp)+ + apply (blast dest: get_tcb_not_asid_pool_at) + apply (subst thread_set_def[symmetric, simplified fun_app_def]) + apply (wp | simp)+ + done + +lemma set_thread_state_runnable_reads_respects[Finalise_IF_assms]: + assumes domains_distinct: "pas_domains_distinct aag" + shows "runnable ts \ reads_respects aag l \ (set_thread_state ref ts)" + unfolding set_thread_state_def fun_app_def + apply (simp add: bind_assoc[symmetric]) + apply (rule pre_ev) + apply (rule_tac P'=\ in bind_ev) + apply (rule set_thread_state_act_runnable_reads_respects) + apply (case_tac "aag_can_read aag ref \ aag_can_affect aag l ref") + apply (wp set_object_reads_respects gets_the_ev) + apply (fastforce simp: get_tcb_def split: option.splits elim: reads_equivE affects_equivE equiv_forE) + apply (simp add: equiv_valid_def2) + apply (rule equiv_valid_rv_bind) + apply (rule equiv_valid_rv_trivial) + apply (wp | simp)+ + apply (rule_tac P=\ and P'=\ and L="{pasObjectAbs aag ref}" and L'="{pasObjectAbs aag ref}" + in ev2_invisible[OF domains_distinct]) + apply (blast | simp add: labels_are_invisible_def)+ + apply (rule set_object_modifies_at_most) + apply (rule set_object_modifies_at_most) + apply (simp | wp)+ + apply (blast dest: get_tcb_not_asid_pool_at) + apply (subst thread_set_def[symmetric, simplified fun_app_def]) + apply (wp thread_set_st_tcb_at | simp)+ + done + +lemma set_bound_notification_none_reads_respects[Finalise_IF_assms]: + assumes domains_distinct: "pas_domains_distinct aag" + shows "reads_respects aag l \ (set_bound_notification ref None)" + unfolding set_bound_notification_def fun_app_def + apply (rule pre_ev(5)[where Q=\]) + apply (case_tac "aag_can_read aag ref \ aag_can_affect aag l ref") + apply (wp set_object_reads_respects gets_the_ev)[1] + apply (fastforce simp: get_tcb_def split: option.splits elim: reads_equivE affects_equivE equiv_forE) + apply (simp add: equiv_valid_def2) + apply (rule equiv_valid_rv_bind) + apply (rule equiv_valid_rv_trivial) + apply (wp | simp)+ + apply (rule_tac P=\ and P'=\ and L="{pasObjectAbs aag ref}" and L'="{pasObjectAbs aag ref}" + in ev2_invisible[OF domains_distinct]) + apply (blast | simp add: labels_are_invisible_def)+ + apply (rule set_object_modifies_at_most) + apply (rule set_object_modifies_at_most) + apply (simp | wp)+ + apply (blast dest: get_tcb_not_asid_pool_at) + apply simp + done + +lemma set_tcb_queue_reads_respects[Finalise_IF_assms, wp]: + "reads_respects aag l \ (set_tcb_queue d prio queue)" + unfolding equiv_valid_def2 equiv_valid_2_def + apply (clarsimp simp: set_tcb_queue_def bind_def modify_def put_def get_def) + apply (rule conjI) + apply (rule reads_equiv_ready_queues_update, assumption) + apply (fastforce simp: reads_equiv_def affects_equiv_def states_equiv_for_def equiv_for_def) + apply (rule affects_equiv_ready_queues_update, assumption) + apply (clarsimp simp: reads_equiv_def affects_equiv_def states_equiv_for_def equiv_for_def + equiv_asids_def equiv_asid_def) + apply (rule ext) + apply force + done + +lemma set_tcb_queue_modifies_at_most: + "modifies_at_most aag L (\s. pasDomainAbs aag d \ L \ {}) (set_tcb_queue d prio queue)" + apply (rule modifies_at_mostI) + apply (simp add: set_tcb_queue_def modify_def, wp) + apply (force simp: equiv_but_for_labels_def states_equiv_for_def equiv_for_def equiv_asids_def) + done + +lemma set_notification_equiv_but_for_labels[Finalise_IF_assms]: + "\equiv_but_for_labels aag L st and K (pasObjectAbs aag ntfnptr \ L)\ + set_notification ntfnptr ntfn + \\_. equiv_but_for_labels aag L st\" + unfolding set_simple_ko_def + apply (wp set_object_equiv_but_for_labels get_object_wp) + apply (clarsimp simp: asid_pool_at_kheap partial_inv_def obj_at_def split: kernel_object.splits) + done + +lemma thread_set_reads_respects[Finalise_IF_assms]: + assumes domains_distinct[wp]: "pas_domains_distinct aag" + shows "reads_respects aag l \ (thread_set x y)" + unfolding thread_set_def fun_app_def + apply (case_tac "aag_can_read aag y \ aag_can_affect aag l y") + apply (wp set_object_reads_respects) + apply (clarsimp, rule reads_affects_equiv_get_tcb_eq, simp+)[1] + apply (simp add: equiv_valid_def2) + apply (rule equiv_valid_rv_guard_imp) + apply (rule_tac L="{pasObjectAbs aag y}" and L'="{pasObjectAbs aag y}" + in ev2_invisible[OF domains_distinct]) + apply (assumption | simp add: labels_are_invisible_def)+ + apply (rule modifies_at_mostI[where P="\"] + | wp set_object_equiv_but_for_labels + | simp + | (clarify, drule get_tcb_not_asid_pool_at))+ + done + +lemma aag_cap_auth_ASIDPoolCap: + "pas_cap_cur_auth aag (ArchObjectCap (ASIDPoolCap r asid)) \ + pas_refined aag s \ is_subject aag r" + unfolding aag_cap_auth_def + by (simp add: clas_no_asid cap_auth_conferred_def arch_cap_auth_conferred_def + cli_no_irqs pas_refined_all_auth_is_owns) + +lemma aag_cap_auth_PageDirectory: + "pas_cap_cur_auth aag (ArchObjectCap (PageTableCap word pt_t (Some a))) \ + pas_refined aag s \ is_subject aag word" + unfolding aag_cap_auth_def + by (simp add: clas_no_asid cap_auth_conferred_def arch_cap_auth_conferred_def + cli_no_irqs pas_refined_all_auth_is_owns) + +lemma aag_cap_auth_ASIDPoolCap_asid: + "\ pas_cap_cur_auth aag (ArchObjectCap (ASIDPoolCap r asid)); asid' \ 0; + asid_high_bits_of asid' = asid_high_bits_of asid; pas_refined aag s \ + \ is_subject_asid aag asid'" + apply (frule (1) aag_cap_auth_ASIDPoolCap) + apply (unfold aag_cap_auth_def) + apply (rule is_subject_into_is_subject_asid) + apply auto + done + +lemma aag_cap_auth_PageCap_asid: + "\ pas_cap_cur_auth aag (ArchObjectCap (FrameCap dev ref r sz (Some (a, b)))); pas_refined aag s \ + \ is_subject_asid aag a" + by (auto simp: aag_cap_auth_def cap_links_asid_slot_def label_owns_asid_slot_def + intro: pas_refined_Control_into_is_subject_asid) + +lemma aag_cap_auth_PageTableCap: + "\ pas_cap_cur_auth aag (ArchObjectCap (PageTableCap word pt_t option)); pas_refined aag s \ + \ is_subject aag word" + unfolding aag_cap_auth_def + by (simp add: clas_no_asid cap_auth_conferred_def arch_cap_auth_conferred_def + cli_no_irqs pas_refined_all_auth_is_owns) + +lemma aag_cap_auth_PageTableCap_asid: + "\ pas_cap_cur_auth aag (ArchObjectCap (PageTableCap word pt_t (Some (a, b)))); pas_refined aag s \ + \ is_subject_asid aag a" + by (auto simp: aag_cap_auth_def cap_links_asid_slot_def label_owns_asid_slot_def + intro: pas_refined_Control_into_is_subject_asid) + +lemma aag_cap_auth_PageDirectoryCap: + "\ pas_cap_cur_auth aag (ArchObjectCap (PageTableCap word pt_t option)); pas_refined aag s \ + \ is_subject aag word" + unfolding aag_cap_auth_def + by (simp add: clas_no_asid cap_auth_conferred_def arch_cap_auth_conferred_def + cli_no_irqs pas_refined_all_auth_is_owns) + +lemma aag_cap_auth_PageDirectoryCap_asid: + "\ pas_cap_cur_auth aag (ArchObjectCap (PageTableCap word pt_t (Some (a,vref)))); pas_refined aag s \ + \ is_subject_asid aag a" + unfolding aag_cap_auth_def + by (auto simp: cap_links_asid_slot_def label_owns_asid_slot_def + intro: pas_refined_Control_into_is_subject_asid) + +lemmas aag_cap_auth_subject = aag_cap_auth_ASIDPoolCap_asid + aag_cap_auth_PageCap_asid + aag_cap_auth_PageTableCap_asid + +lemma prepare_thread_delete_reads_respects_f[Finalise_IF_assms]: + "reads_respects_f aag l \ (prepare_thread_delete thread)" + unfolding prepare_thread_delete_def + apply wp + sorry + +lemma arch_finalise_cap_reads_respects[Finalise_IF_assms]: + "reads_respects aag l (pas_refined aag and invs and cte_wp_at ((=) (ArchObjectCap cap)) slot + and K (pas_cap_cur_auth aag (ArchObjectCap cap))) + (arch_finalise_cap cap final)" + unfolding arch_finalise_cap_def + apply (rule gen_asm_ev) + apply (case_tac cap) + apply simp + apply (simp split: bool.splits) + apply (intro impI conjI) + apply (wp delete_asid_pool_reads_respects unmap_page_reads_respects unmap_page_table_reads_respects + delete_asid_reads_respects find_vspace_for_asid_reads_respects + | simp add: invs_psp_aligned invs_vspace_objs invs_valid_objs valid_cap_def + valid_arch_state_asid_table invs_arch_state wellformed_mapdata_def + split: option.splits bool.splits pt_type.splits + | intro impI conjI allI + | elim conjE + | drule cte_wp_valid_cap + | fastforce dest: aag_can_read_own_asids aag_cap_auth_subject)+ + sorry + +(*NOTE: Required to dance around the issue of the base potentially + being zero and thus we can't conclude it is in the current subject.*) +lemma requiv_arm_asid_table_asid_high_bits_of_asid_eq': + "\ pas_cap_cur_auth aag (ArchObjectCap (ASIDPoolCap p b)); reads_equiv aag s t; pas_refined aag x \ + \ asid_table s (asid_high_bits_of b) = + asid_table t (asid_high_bits_of b)" + apply (subgoal_tac "asid_high_bits_of 0 = asid_high_bits_of 1") + apply (case_tac "b = 0") + apply (subgoal_tac "is_subject_asid aag 1") + apply ((fastforce intro: requiv_arm_asid_table_asid_high_bits_of_asid_eq + aag_cap_auth_ASIDPoolCap_asid)+)[2] + apply (auto intro: requiv_arm_asid_table_asid_high_bits_of_asid_eq + aag_cap_auth_ASIDPoolCap_asid)[1] + apply (simp add: asid_high_bits_of_def asid_low_bits_def) + done + +lemma pt_cap_aligned: + "\ caps_of_state s p = Some (ArchObjectCap (PageTableCap word pt_t x)); valid_caps (caps_of_state s) s \ + \ is_aligned word (pt_bits pt_t)" + by (auto simp: obj_ref_of_def pt_bits_def pageBits_def + dest!: cap_aligned_valid[OF valid_capsD, unfolded cap_aligned_def, THEN conjunct1]) + +lemma maskInterrupt_no_mem: + "maskInterrupt a b \\ms. P (underlying_memory ms)\" + by (wpsimp simp: maskInterrupt_def) + +lemma set_irq_state_valid_global_objs: + "set_irq_state state irq \valid_global_objs\" + apply (simp add: set_irq_state_def) + apply (wp modify_wp) + apply (fastforce simp: valid_global_objs_def) + done + +lemma set_irq_state_globals_equiv[Finalise_IF_assms]: + "set_irq_state state irq \globals_equiv st\" + apply (simp add: set_irq_state_def) + apply (wp dmo_no_mem_globals_equiv maskInterrupt_no_mem modify_wp) + apply (simp add: globals_equiv_interrupt_states_update) + done + +lemma set_notification_globals_equiv[Finalise_IF_assms]: + "\globals_equiv st and valid_arch_state\ + set_notification ptr ntfn + \\_. globals_equiv st\" + unfolding set_simple_ko_def + apply (wp set_object_globals_equiv get_object_wp) + apply (fastforce simp: obj_at_def valid_arch_state_def dest: valid_global_arch_objs_pt_at) + done + +lemma delete_asid_globals_equiv: + "\globals_equiv st and valid_arch_state\ + delete_asid asid pt + \\_. globals_equiv st\" + unfolding delete_asid_def + apply (wpsimp wp: set_vm_root_globals_equiv set_asid_pool_globals_equiv) + sorry + +lemma arch_finalise_cap_globals_equiv[Finalise_IF_assms]: + "\globals_equiv st and invs and valid_arch_cap cap\ + arch_finalise_cap cap b + \\_. globals_equiv st\" + apply (induct cap; simp add: arch_finalise_cap_def) + apply (wp delete_asid_pool_globals_equiv case_option_wp unmap_page_globals_equiv + unmap_page_table_globals_equiv delete_asid_globals_equiv + | wpc | clarsimp simp: valid_arch_cap_def wellformed_mapdata_def)+ + apply fastforce + apply (wp delete_asid_pool_globals_equiv case_option_wp unmap_page_globals_equiv + unmap_page_table_globals_equiv delete_asid_globals_equiv + | wpc | clarsimp simp: valid_arch_cap_def wellformed_mapdata_def)+ + sorry + +declare arch_get_sanitise_register_info_def[simp] + +lemma prepare_thread_delete_globals_equiv[Finalise_IF_assms, wp]: + "prepare_thread_delete t \globals_equiv s\" + unfolding prepare_thread_delete_def + apply wpsimp + sorry + +lemma set_bound_notification_globals_equiv[Finalise_IF_assms]: + "\globals_equiv s and valid_arch_state\ + set_bound_notification ref ts + \\_. globals_equiv s\" + unfolding set_bound_notification_def + apply (wp set_object_globals_equiv dxo_wp_weak |simp)+ + apply (intro impI conjI allI) + by (fastforce simp: valid_arch_state_def obj_at_def tcb_at_def2 get_tcb_def is_tcb_def + dest: get_tcb_SomeD valid_global_arch_objs_pt_at + split: option.splits kernel_object.splits)+ + +end + + +global_interpretation Finalise_IF_1?: Finalise_IF_1 +proof goal_cases + interpret Arch . + case 1 show ?case + by (unfold_locales; (fact Finalise_IF_assms)?) +qed + +end diff --git a/proof/infoflow/AARCH64/ArchIRQMasks_IF.thy b/proof/infoflow/AARCH64/ArchIRQMasks_IF.thy new file mode 100644 index 0000000000..0ac9f24249 --- /dev/null +++ b/proof/infoflow/AARCH64/ArchIRQMasks_IF.thy @@ -0,0 +1,225 @@ +(* + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +theory ArchIRQMasks_IF +imports IRQMasks_IF +begin + +context Arch begin global_naming AARCH64 + +named_theorems IRQMasks_IF_assms + +declare storeWord_irq_masks_inv[IRQMasks_IF_assms] + +lemma resetTimer_irq_masks[IRQMasks_IF_assms, wp]: + "resetTimer \\s. P (irq_masks s)\" + by (simp add: resetTimer_def | wp no_irq)+ + +lemma delete_objects_irq_masks[IRQMasks_IF_assms, wp]: + "delete_objects ptr bits \\s. P (irq_masks_of_state s)\" + apply (simp add: delete_objects_def) + apply (wp dmo_wp no_irq_mapM_x no_irq | simp add: freeMemory_def no_irq_storeWord)+ + done + +crunch invoke_untyped + for irq_masks[IRQMasks_IF_assms, wp]: "\s. P (irq_masks_of_state s)" + (ignore: delete_objects wp: crunch_wps dmo_wp + wp: mapME_x_inv_wp preemption_point_inv + simp: crunch_simps no_irq_clearMemory mapM_x_def_bak unless_def) + +lemma vcpu_invalidate_active_irq_masks[wp]: + "vcpu_invalidate_active \\s. P (irq_masks_of_state s)\" + unfolding vcpu_invalidate_active_def vcpu_disable_def + by (wpsimp wp: dmo_wp) + +crunch finalise_cap + for irq_masks[IRQMasks_IF_assms, wp]: "\s. P (irq_masks_of_state s)" + ( wp: crunch_wps dmo_wp no_irq + simp: crunch_simps no_irq_setVSpaceRoot) + +crunch send_signal, timer_tick + for irq_masks[IRQMasks_IF_assms, wp]: "\s. P (irq_masks_of_state s)" + (wp: crunch_wps ignore: do_machine_op wp: dmo_wp simp: crunch_simps) + +lemma handle_interrupt_irq_masks[IRQMasks_IF_assms]: + notes no_irq[wp del] + shows + "\(\s. P (irq_masks_of_state s)) and domain_sep_inv False st and K (irq \ maxIRQ)\ + handle_interrupt irq + \\rv s. P (irq_masks_of_state s)\" + apply (rule hoare_gen_asm) + apply (simp add: handle_interrupt_def split del: if_split) + apply (rule hoare_pre) + apply (rule hoare_if) + apply simp + apply (wp dmo_wp + | simp add: ackInterrupt_def maskInterrupt_def when_def split del: if_split + | wpc + | simp add: get_irq_state_def + | wp (once) hoare_drop_imp hoare_pre_cont)+ + apply (clarsimp simp: domain_sep_inv_def) + apply metis + done + +lemma arch_invoke_irq_control_irq_masks[IRQMasks_IF_assms]: + "\domain_sep_inv False st and arch_irq_control_inv_valid invok\ + arch_invoke_irq_control invok + \\_ s. P (irq_masks_of_state s)\" + apply (case_tac invok) + apply (auto simp: arch_irq_control_inv_valid_def domain_sep_inv_def valid_def) + done + +crunch handle_vm_fault + for irq_masks[IRQMasks_IF_assms, wp]: "\s. P (irq_masks_of_state s)" + (wp: dmo_wp no_irq) + +lemma store_word_offs_irq_masks[wp]: + "store_word_offs ptr offs v \\s. P (irq_masks_of_state s)\" + unfolding store_word_offs_def + by (wpsimp wp: no_irq_storeWord dmo_wp) + +crunch set_extra_badge + for irq_masks[wp]: "\s. P (irq_masks_of_state s)" + + +lemma dmo_getActiveIRQ_irq_masks[IRQMasks_IF_assms, wp]: + "do_machine_op (getActiveIRQ in_kernel) \\s. P (irq_masks_of_state s)\" + apply (rule hoare_pre, rule dmo_wp) + apply (simp add: getActiveIRQ_def | wp | simp add: no_irq_def | clarsimp)+ + done + +lemma dmo_getActiveIRQ_return_axiom[IRQMasks_IF_assms, wp]: + "\\\ do_machine_op (getActiveIRQ in_kernel) \\rv s. (\x. rv = Some x \ x \ maxIRQ)\" + apply (simp add: getActiveIRQ_def) + apply (rule hoare_pre, rule dmo_wp) + apply (insert irq_oracle_max_irq) + apply (wp dmo_getActiveIRQ_irq_masks) + apply clarsimp + done + +crunch activate_thread, handle_spurious_irq + for irq_masks[IRQMasks_IF_assms, wp]: "\s. P (irq_masks_of_state s)" + + +end + + +global_interpretation IRQMasks_IF_1?: IRQMasks_IF_1 +proof goal_cases + interpret Arch . + case 1 show ?case + by (unfold_locales; (fact IRQMasks_IF_assms)?) +qed + + +context Arch begin global_naming AARCH64 + +crunch handle_hypervisor_fault + for irq_masks[IRQMasks_IF_assms, wp]: "\s. P (irq_masks_of_state s)" + (wp: dmo_wp) + +(* FIXME AARCH64 IF: vcpu_disable modifies irq masks *) +lemma schedule_irq_masks[IRQMasks_IF_assms, wp]: + "schedule \\s. P (irq_masks_of_state s)\" + sorry + +crunch do_reply_transfer, set_priority, set_flags + for irq_masks[IRQMasks_IF_assms, wp]: "\s. P (irq_masks_of_state s)" + (wp: crunch_wps empty_slot_irq_masks simp: crunch_simps unless_def) + +crunch arch_thread_set + for irq_masks[wp]: "\s. P (irq_masks_of_state s)" + +crunch arch_post_set_flags + for irq_masks[IRQMasks_IF_assms, wp]: "\s. P (irq_masks_of_state s)" + (wp: crunch_wps dmo_wp empty_slot_irq_masks simp: crunch_simps unless_def) + +lemma no_irq_do_flush: + "no_irq (do_flush type vstart vend pstart)" + by (wpsimp simp: do_flush_def) + +crunch perform_vspace_invocation, perform_page_table_invocation, perform_asid_control_invocation, + perform_asid_pool_invocation, perform_sgi_invocation, perform_page_invocation + for irq_masks[IRQMasks_IF_assms, wp]: "\s. P (irq_masks_of_state s)" + (wp: dmo_wp crunch_wps no_irq simp: no_irq_do_flush crunch_simps) + +(* FIXME AARCH64 IF: vcpu_disable modifies irq masks *) +lemma perform_vcpu_invocation_irq_masks[wp]: + "perform_vcpu_invocation i \\s. P (irq_masks_of_state s)\" + sorry + +lemma arch_perform_invocation_irq_masks[IRQMasks_IF_assms, wp]: + "arch_perform_invocation i \\s. P (irq_masks_of_state s)\" + unfolding arch_perform_invocation_def fun_app_def + by wpsimp + +(* FIXME: remove duplication in this proof -- requires getting the wp automation + to do the right thing with dropping imps in validE goals *) +lemma invoke_tcb_irq_masks[IRQMasks_IF_assms]: + "\(\s. P (irq_masks_of_state s)) and domain_sep_inv False st and tcb_inv_wf tinv\ + invoke_tcb tinv + \\_ s. P (irq_masks_of_state s)\" + apply (case_tac tinv) + apply((wp restart_irq_masks hoare_vcg_if_lift mapM_x_wp[OF _ subset_refl] + | wpc + | simp split del: if_split add: check_cap_at_def + | clarsimp)+)[3] + defer + apply ((wp | simp )+)[2] + (* NotificationControl *) + apply (rename_tac option) + apply (case_tac option) + apply ((wp | simp)+)[2] + (* just ThreadControl left *) + apply (simp add: split_def cong: option.case_cong) + apply wpsimp+ + apply (rule hoare_strengthen_postE[OF cap_delete_irq_masks[where P=P]]) + apply blast + apply blast + apply (wpsimp wp: hoare_vcg_all_liftE_R hoare_vcg_const_imp_liftE_R hoare_vcg_all_lift hoare_drop_imps + checked_cap_insert_domain_sep_inv)+ + apply (rule_tac Q'="\ r s. domain_sep_inv False st s \ P (irq_masks_of_state s)" + and E'="\_ s. P (irq_masks_of_state s)" in hoare_strengthen_postE) + apply (wp hoare_vcg_conj_liftE1 cap_delete_irq_masks) + apply fastforce + apply blast + apply (wpsimp wp: hoare_weak_lift_imp hoare_vcg_all_lift checked_cap_insert_domain_sep_inv)+ + apply (rule_tac Q'="\ r s. domain_sep_inv False st s \ P (irq_masks_of_state s)" + and E'="\_ s. P (irq_masks_of_state s)" in hoare_strengthen_postE) + apply (wp hoare_vcg_conj_liftE1 cap_delete_irq_masks) + apply fastforce + apply blast + apply (simp add: option_update_thread_def | wp hoare_weak_lift_imp hoare_vcg_all_lift | wpc)+ + by fastforce+ + +declare init_arch_objects_irq_masks[IRQMasks_IF_assms] + +crunch arch_prepare_set_domain + for irq_masks[IRQMasks_IF_assms,wp]: "\s. P (irq_masks_of_state s)" + (wp: dmo_wp mapM_x_wp_inv mapM_wp_inv) + +end + + +global_interpretation IRQMasks_IF_2?: IRQMasks_IF_2 +proof goal_cases + interpret Arch . + case 1 show ?case + by (unfold_locales; (fact IRQMasks_IF_assms)?) +qed + + +requalify_facts + AARCH64.init_arch_objects_irq_masks + AARCH64.arch_activate_idle_thread_irq_masks + AARCH64.retype_region_irq_masks + +declare + init_arch_objects_irq_masks[wp] + arch_activate_idle_thread_irq_masks[wp] + retype_region_irq_masks[wp] + +end diff --git a/proof/infoflow/AARCH64/ArchInfoFlow.thy b/proof/infoflow/AARCH64/ArchInfoFlow.thy new file mode 100644 index 0000000000..20ed8281a3 --- /dev/null +++ b/proof/infoflow/AARCH64/ArchInfoFlow.thy @@ -0,0 +1,73 @@ +(* + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +theory ArchInfoFlow +imports + "Access.ArchSyscall_AC" + "Lib.EquivValid" +begin + +context Arch begin global_naming AARCH64 + +section \Arch-specific equivalence properties\ + +subsection \ASID equivalence\ + +definition equiv_asid :: "asid \ det_ext state \ det_ext state \ bool" where + "equiv_asid asid s s' \ + ((arm_asid_table (arch_state s) (asid_high_bits_of asid)) = + (arm_asid_table (arch_state s') (asid_high_bits_of asid))) \ + (\pool_ptr. arm_asid_table (arch_state s) (asid_high_bits_of asid) = Some pool_ptr + \ asid_pool_at pool_ptr s = asid_pool_at pool_ptr s' \ + (\asid_pool asid_pool'. asid_pools_of s pool_ptr = Some asid_pool \ + asid_pools_of s' pool_ptr = Some asid_pool' + \ asid_pool (asid_low_bits_of asid) = + asid_pool' (asid_low_bits_of asid)))" + +definition equiv_asid' where + "equiv_asid' asid pool_ptr_opt pool_ptr_opt' kh kh' \ + (case pool_ptr_opt of None \ pool_ptr_opt' = None + | Some pool_ptr \ + (case pool_ptr_opt' of None \ False + | Some pool_ptr' \ + (pool_ptr' = pool_ptr \ + ((\asid_pool. kh pool_ptr = Some (ArchObj (ASIDPool asid_pool))) = + (\asid_pool'. kh' pool_ptr' = Some (ArchObj (ASIDPool asid_pool')))) \ + (\asid_pool asid_pool'. kh pool_ptr = Some (ArchObj (ASIDPool asid_pool)) \ + kh' pool_ptr' = Some (ArchObj (ASIDPool asid_pool')) + \ asid_pool (asid_low_bits_of asid) = + asid_pool' (asid_low_bits_of asid)))))" + +definition non_asid_pool_kheap_update where + "non_asid_pool_kheap_update s kh \ + \x. (\asid_pool. kheap s x = Some (ArchObj (ASIDPool asid_pool)) \ + kh x = Some (ArchObj (ASIDPool asid_pool))) + \ kheap s x = kh x" + + +subsection \Exclusive machine state equivalence\ + +subsection \Global (Kernel) VSpace equivalence\ +(* globals_equiv should be maintained by everything except the scheduler, since + nothing else touches the globals frame *) + +definition arch_globals_equiv :: "obj_ref \ obj_ref \ kheap \ kheap \ arch_state \ + arch_state \ machine_state \ machine_state \ bool" where + "arch_globals_equiv ct it kh kh' as as' ms ms' \ + arm_us_global_vspace as = arm_us_global_vspace as' \ + kh (arm_us_global_vspace as) = kh' (arm_us_global_vspace as)" + +declare arch_globals_equiv_def[simp] + +end + +requalify_consts + AARCH64.equiv_asid + AARCH64.equiv_asid' + AARCH64.arch_globals_equiv + AARCH64.non_asid_pool_kheap_update + +end diff --git a/proof/infoflow/AARCH64/ArchInfoFlow_IF.thy b/proof/infoflow/AARCH64/ArchInfoFlow_IF.thy new file mode 100644 index 0000000000..84c2780a23 --- /dev/null +++ b/proof/infoflow/AARCH64/ArchInfoFlow_IF.thy @@ -0,0 +1,122 @@ +(* + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +theory ArchInfoFlow_IF +imports InfoFlow_IF +begin + +context Arch begin global_naming AARCH64 + +named_theorems InfoFlow_IF_assms + +lemma asid_pool_at_kheap: + "asid_pool_at ptr s = (\asid_pool. kheap s ptr = Some (ArchObj (ASIDPool asid_pool)))" + by (simp add: atyp_at_eq_kheap_obj) + +lemma equiv_asid: + "equiv_asid asid s s' = equiv_asid' asid (arm_asid_table (arch_state s) (asid_high_bits_of asid)) + (arm_asid_table (arch_state s') (asid_high_bits_of asid)) + (kheap s) (kheap s')" + by (auto simp: equiv_asid_def equiv_asid'_def asid_pool_at_kheap opt_map_def split: option.splits) + +lemma equiv_asids_refl[InfoFlow_IF_assms]: + "equiv_asids R s s" + by (auto simp: equiv_asids_def equiv_asid_def) + +lemma equiv_asids_sym[InfoFlow_IF_assms]: + "equiv_asids R s t \ equiv_asids R t s" + by (auto simp: equiv_asids_def equiv_asid_def) + +lemma equiv_asids_trans[InfoFlow_IF_assms]: + "\ equiv_asids R s t; equiv_asids R t u \ \ equiv_asids R s u" + by (fastforce simp: equiv_asids_def equiv_asid_def asid_pool_at_kheap asid_pools_of_ko_at obj_at_def) + +lemma equiv_asids_non_asid_pool_kheap_update[InfoFlow_IF_assms]: + "\ equiv_asids R s s'; non_asid_pool_kheap_update s kh; non_asid_pool_kheap_update s' kh' \ + \ equiv_asids R (s\kheap := kh\) (s'\kheap := kh'\)" + apply (clarsimp simp: equiv_asids_def equiv_asid non_asid_pool_kheap_update_def) + apply (fastforce simp: equiv_asid'_def split: option.splits) + done + +lemma equiv_asids_identical_kheap_updates[InfoFlow_IF_assms]: + "\ equiv_asids R s s'; identical_kheap_updates s s' kh kh' \ + \ equiv_asids R (s\kheap := kh\) (s'\kheap := kh'\)" + apply (clarsimp simp: equiv_asids_def equiv_asid_def opt_map_def + asid_pool_at_kheap identical_kheap_updates_def) + apply (case_tac "kh pool_ptr = kh' pool_ptr"; fastforce) + done + +lemma equiv_asids_trivial[InfoFlow_IF_assms]: + "(\x. P x \ False) \ equiv_asids P x y" + by (auto simp: equiv_asids_def) + +lemma equiv_asids_triv': + "\ equiv_asids R s s'; kheap t = kheap s; kheap t' = kheap s'; + arm_asid_table (arch_state t) = arm_asid_table (arch_state s); + arm_asid_table (arch_state t') = arm_asid_table (arch_state s') \ + \ equiv_asids R t t'" + by (fastforce simp: equiv_asids_def equiv_asid equiv_asid'_def) + +lemma equiv_asids_triv[InfoFlow_IF_assms]: + "\ equiv_asids R s s'; kheap t = kheap s; kheap t' = kheap s'; + arch_state t = arch_state s; arch_state t' = arch_state s' \ + \ equiv_asids R t t'" + by (fastforce simp: equiv_asids_triv') + +lemma globals_equiv_refl[InfoFlow_IF_assms]: + "globals_equiv s s" + by (simp add: globals_equiv_def idle_equiv_refl) + +lemma globals_equiv_sym[InfoFlow_IF_assms]: + "globals_equiv s t \ globals_equiv t s" + by (auto simp: globals_equiv_def idle_equiv_def) + +lemma globals_equiv_trans[InfoFlow_IF_assms]: + "\ globals_equiv s t; globals_equiv t u \ \ globals_equiv s u" + unfolding globals_equiv_def arch_globals_equiv_def + by clarsimp (metis idle_equiv_trans idle_equiv_def) + +lemma equiv_asids_guard_imp[InfoFlow_IF_assms]: + "\ equiv_asids R s s'; \x. Q x \ R x \ \ equiv_asids Q s s'" + by (auto simp: equiv_asids_def) + +lemma dmo_loadWord_rev[InfoFlow_IF_assms]: + "reads_equiv_valid_inv A aag (K (for_each_byte_of_word (aag_can_read aag) p)) + (do_machine_op (loadWord p))" + apply (rule gen_asm_ev) + apply (rule use_spec_ev) + apply (rule spec_equiv_valid_hoist_guard) + apply (rule do_machine_op_spec_rev) + apply (simp add: loadWord_def equiv_valid_def2 spec_equiv_valid_def) + apply (rule_tac R'="\rv rv'. for_each_byte_of_word (\y. rv y = rv' y) p" and Q="\\" and Q'="\\" + and P="\" and P'="\" in equiv_valid_2_bind_pre) + apply (rule_tac R'="(=)" and Q="\r s. p && mask 3 = 0" and Q'="\r s. p && mask 3 = 0" + and P="\" and P'="\" in equiv_valid_2_bind_pre) + apply (rule return_ev2) + apply (rule_tac f="word_rcat" in arg_cong) + apply (clarsimp simp: upto.simps) + apply (fastforce intro: is_aligned_no_wrap' word_plus_mono_right + simp: is_aligned_mask for_each_byte_of_word_def word_size_def) + apply (rule assert_ev2[OF refl]) + apply (rule assert_wp)+ + apply simp+ + apply (clarsimp simp: equiv_valid_2_def in_monad for_each_byte_of_word_def) + apply (erule equiv_forD) + apply fastforce + apply (wp wp_post_taut loadWord_inv | simp)+ + done + +end + + +global_interpretation InfoFlow_IF_1?: InfoFlow_IF_1 +proof goal_cases + interpret Arch . + case 1 show ?case + by (unfold_locales; (fact InfoFlow_IF_assms)?) +qed + +end diff --git a/proof/infoflow/AARCH64/ArchInterrupt_IF.thy b/proof/infoflow/AARCH64/ArchInterrupt_IF.thy new file mode 100644 index 0000000000..45d9a0284c --- /dev/null +++ b/proof/infoflow/AARCH64/ArchInterrupt_IF.thy @@ -0,0 +1,63 @@ +(* + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +theory ArchInterrupt_IF +imports Interrupt_IF +begin + +context Arch begin global_naming AARCH64 + +named_theorems Interrupt_IF_assms + +lemma arch_invoke_irq_handler_reads_respects[Interrupt_IF_assms, wp]: + "reads_respects_f aag l (silc_inv aag st) (arch_invoke_irq_handler irq)" + apply (cases irq) + apply (wpsimp simp: plic_complete_claim_def deactivateInterrupt_def maskInterrupt_def) + apply (rule reads_respects_f[where P=\ and Q=\, simplified]) + apply (rule use_spec_ev) + apply (rule do_machine_op_spec_reads_respects) + apply (simp add: equiv_valid_def2) + apply (rule modify_ev2) + apply (fastforce simp: equiv_for_def) + apply (wp modify_wp | simp)+ + done + +lemma arch_invoke_irq_control_reads_respects[Interrupt_IF_assms]: + "reads_respects aag (l :: 'a subject_label) (K (arch_authorised_irq_ctl_inv aag i)) + (arch_invoke_irq_control i)" + apply (cases i) + apply (simp add: setIRQTrigger_def) + apply (wp cap_insert_reads_respects set_irq_state_reads_respects dmo_mol_reads_respects | simp)+ + apply (clarsimp simp: arch_authorised_irq_ctl_inv_def) + apply (wpsimp wp: equiv_valid_guard_imp[OF cap_insert_reads_respects]) + apply (clarsimp simp: arch_authorised_irq_ctl_inv_def) + done + +lemma arch_invoke_irq_control_globals_equiv[Interrupt_IF_assms]: + "\globals_equiv st and valid_arch_state and valid_global_objs\ + arch_invoke_irq_control ai + \\_. globals_equiv st\" + apply (induct ai) + apply (simp add: setIRQTrigger_def) + apply (wpsimp wp: set_irq_state_globals_equiv set_irq_state_valid_global_objs + cap_insert_globals_equiv'' dmo_mol_globals_equiv)+ + done + +lemma arch_invoke_irq_handler_globals_equiv[Interrupt_IF_assms, wp]: + "arch_invoke_irq_handler irq \globals_equiv st\" + by (cases irq; wpsimp wp: dmo_no_mem_globals_equiv simp: plic_complete_claim_def deactivateInterrupt_def) + +end + + +global_interpretation Interrupt_IF_1?: Interrupt_IF_1 +proof goal_cases + interpret Arch . + case 1 show ?case + by (unfold_locales; (fact Interrupt_IF_assms)?) +qed + +end diff --git a/proof/infoflow/AARCH64/ArchIpc_IF.thy b/proof/infoflow/AARCH64/ArchIpc_IF.thy new file mode 100644 index 0000000000..fb0f7194b5 --- /dev/null +++ b/proof/infoflow/AARCH64/ArchIpc_IF.thy @@ -0,0 +1,479 @@ +(* + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +theory ArchIpc_IF +imports Ipc_IF +begin + +context Arch begin global_naming AARCH64 + +named_theorems Ipc_IF_assms + +lemma lookup_ipc_buffer_reads_respects[Ipc_IF_assms]: + "reads_respects aag l (K (aag_can_read aag thread \ aag_can_affect aag l thread)) + (lookup_ipc_buffer is_receiver thread)" + unfolding lookup_ipc_buffer_def + by (wp thread_get_reads_respects get_cap_reads_respects | wpc | simp)+ + +lemma as_user_equiv_but_for_labels[Ipc_IF_assms]: + "\equiv_but_for_labels aag L st and K (pasObjectAbs aag thread \ L)\ + as_user thread f + \\_. equiv_but_for_labels aag L st\" + unfolding as_user_def + apply (wp set_object_equiv_but_for_labels | simp add: split_def)+ + apply (blast dest: get_tcb_not_asid_pool_at) + done + +lemma storeWord_equiv_but_for_labels[Ipc_IF_assms]: + "\\ms. equiv_but_for_labels aag L st (s\machine_state := ms\) \ + for_each_byte_of_word (\x. pasObjectAbs aag x \ L) p\ + storeWord p v + \\_ ms. equiv_but_for_labels aag L st (s\machine_state := ms\)\" + unfolding storeWord_def + apply (wp modify_wp) + apply (clarsimp simp: equiv_but_for_labels_def) + apply (rule states_equiv_forI) + apply (fastforce intro!: equiv_forI elim!: states_equiv_forE dest: equiv_forD) + apply (simp add: states_equiv_for_def) + apply (rule conjI) + apply (rule equiv_forI) + apply clarsimp + apply (drule_tac f=underlying_memory in equiv_forD,fastforce) + apply (fastforce intro: is_aligned_no_wrap' word_plus_mono_right + simp: is_aligned_mask for_each_byte_of_word_def word_size_def upto.simps) + apply (rule equiv_forI) + apply clarsimp + apply (drule_tac f=device_state in equiv_forD,fastforce) + apply clarsimp + apply (fastforce elim: states_equiv_forE intro: equiv_forI dest: equiv_forD[where f=cdt]) + apply (fastforce elim: states_equiv_forE intro: equiv_forI dest: equiv_forD[where f=cdt_list]) + apply (fastforce elim: states_equiv_forE intro: equiv_forI dest: equiv_forD[where f=is_original_cap]) + apply (fastforce elim: states_equiv_forE intro: equiv_forI dest: equiv_forD[where f=interrupt_states]) + apply (fastforce elim: states_equiv_forE intro: equiv_forI dest: equiv_forD[where f=interrupt_irq_node]) + apply (fastforce simp: equiv_asids_def equiv_asid_def elim: states_equiv_forE) + apply (fastforce elim: states_equiv_forE intro: equiv_forI dest: equiv_forD[where f=ready_queues]) + done + +lemma set_thread_state_runnable_equiv_but_for_labels[Ipc_IF_assms]: + "runnable tst + \ \equiv_but_for_labels aag L st and K (pasObjectAbs aag thread \ L)\ + set_thread_state thread tst + \\_. equiv_but_for_labels aag L st\" + unfolding set_thread_state_def + apply (wpsimp wp: set_object_equiv_but_for_labels[THEN hoare_set_object_weaken_pre] + set_thread_state_act_runnable_equiv_but_for_labels) + apply (wpsimp wp: set_object_wp)+ + apply (fastforce dest: get_tcb_not_asid_pool_at simp: st_tcb_at_def obj_at_def) + done + +lemma set_endpoint_equiv_but_for_labels[Ipc_IF_assms]: + "\equiv_but_for_labels aag L st and K (pasObjectAbs aag epptr \ L)\ + set_endpoint epptr ep + \\_. equiv_but_for_labels aag L st\" + unfolding set_simple_ko_def + apply (wp set_object_equiv_but_for_labels get_object_wp) + apply (clarsimp simp: asid_pool_at_kheap partial_inv_def obj_at_def split: kernel_object.splits) + done + +(* FIXME move *) +lemma conj_imp: + "\ Q \ R; P \ Q; P' \ Q \ \ (P \ R) \ (P' \ R)" + by fastforce + +(* basically clagged directly from lookup_ipc_buffer_has_auth *) +lemma lookup_ipc_buffer_has_read_auth[Ipc_IF_assms]: + "\pas_refined aag and valid_objs\ + lookup_ipc_buffer is_receiver thread + \\rv s. ipc_buffer_has_read_auth aag (pasObjectAbs aag thread) rv\" + apply (rule hoare_pre) + apply (simp add: lookup_ipc_buffer_def) + apply (wp get_cap_wp thread_get_wp' | wpc)+ + apply (clarsimp simp: cte_wp_at_caps_of_state ipc_buffer_has_read_auth_def get_tcb_ko_at[symmetric]) + apply (frule caps_of_state_tcb_cap_cases [where idx = "tcb_cnode_index 4"]) + apply (simp add: dom_tcb_cap_cases) + apply (frule (1) caps_of_state_valid_cap) + apply (clarsimp simp: vm_read_only_def vm_read_write_def) + apply (rule_tac Q="AllowRead \ xb" in conj_imp) + apply (clarsimp simp: valid_cap_simps cap_aligned_def) + apply (rule conjI) + apply (erule aligned_add_aligned) + apply (rule is_aligned_andI1) + apply (drule (1) valid_tcb_objs) + apply (clarsimp simp: valid_obj_def valid_tcb_def valid_ipc_buffer_cap_def + split: if_splits) + apply (rule order_trans [OF _ pbfs_atleast_pageBits]) + apply (simp add: msg_align_bits pageBits_def) + apply (drule (1) cap_auth_caps_of_state) + apply (clarsimp simp: aag_cap_auth_def cap_auth_conferred_def arch_cap_auth_conferred_def + vspace_cap_rights_to_auth_def vm_read_only_def) + apply (drule bspec) + apply (erule (3) ipcframe_subset_page) + apply (simp_all) + done + +lemma cptrs_in_ipc_buffer[Ipc_IF_assms]: + "\ n \ set [buffer_cptr_index ..< buffer_cptr_index + unat (mi_extra_caps mi)]; + is_aligned (p :: obj_ref) msg_align_bits; + buffer_cptr_index + unat (mi_extra_caps mi) < 2 ^ (msg_align_bits - word_size_bits) \ + \ ptr_range (p + of_nat n * of_nat word_size) word_size_bits \ ptr_range p msg_align_bits" + apply (rule ptr_range_subset) + apply assumption + apply (simp add: msg_align_bits') + apply (simp add: msg_align_bits' word_size_bits_def word_bits_def) + apply (simp add: word_size_def) + apply (subst upto_enum_step_shift_red[where us=3, simplified]) + apply (simp add: msg_align_bits' word_bits_def word_size_bits_def)+ + done + +lemma msg_in_ipc_buffer[Ipc_IF_assms]: + "\ n = msg_max_length \ n < msg_max_length; is_aligned p msg_align_bits; + unat (mi_length mi) < 2 ^ (msg_align_bits - word_size_bits) \ + \ ptr_range (p + of_nat n * of_nat word_size) word_size_bits + \ ptr_range (p :: obj_ref) msg_align_bits" + apply (rule ptr_range_subset) + apply assumption + apply (simp add: msg_align_bits') + apply (simp add: msg_align_bits word_bits_def) + apply (simp add: word_size_def) + apply (subst upto_enum_step_shift_red[where us=3, simplified]) + apply (simp add: msg_align_bits word_bits_def)+ + apply (simp add: image_def) + apply (rule_tac x=n in bexI) + apply (rule refl) + apply (auto simp: msg_max_length_def) + done + +lemma arch_derive_cap_reads_respects[Ipc_IF_assms]: + "reads_respects aag l \ (arch_derive_cap cap)" + unfolding arch_derive_cap_def fun_app_def + apply (rule equiv_valid_guard_imp) + apply (wp | wpc)+ + apply (simp) + done + +lemma arch_derive_cap_rev[Ipc_IF_assms]: + "reads_equiv_valid_inv aag l \ (arch_derive_cap cap)" + unfolding arch_derive_cap_def fun_app_def + apply (rule equiv_valid_guard_imp) + apply (wp | wpc)+ + apply (simp) + done + +lemma captransfer_in_ipc_buffer[Ipc_IF_assms]: + "\ is_aligned (buf :: obj_ref) msg_align_bits; n \ {0..2} \ + \ ptr_range (buf + (2 + (of_nat msg_max_length + of_nat msg_max_extra_caps)) * word_size + + n * word_size) + word_size_bits + \ ptr_range buf msg_align_bits" + apply (rule ptr_range_subset) + apply assumption + apply (simp add: msg_align_bits') + apply (simp add: msg_align_bits word_bits_def) + apply (simp add: word_size_def) + apply (subst upto_enum_step_shift_red[where us=3, simplified]) + apply (simp add: msg_align_bits word_bits_def)+ + apply (simp add: image_def msg_max_length_def msg_max_extra_caps_def) + apply (rule_tac x="(125::nat) + unat n" in bexI) + apply simp+ + apply (fastforce intro: unat_less_helper word_leq_minus_one_le) + done + +lemma mrs_in_ipc_buffer[Ipc_IF_assms]: + "\ n \ set [length msg_registers + 1 ..< Suc n']; + is_aligned (buf :: obj_ref) msg_align_bits; n' < 2 ^ (msg_align_bits - word_size_bits) \ + \ ptr_range (buf + of_nat n * of_nat word_size) word_size_bits \ ptr_range buf msg_align_bits" + apply (rule ptr_range_subset) + apply assumption + apply (simp add: msg_align_bits') + apply (simp add: msg_align_bits word_bits_def) + apply (simp add: word_size_def) + apply (subst upto_enum_step_shift_red[where us=3, simplified]) + apply (simp add: msg_align_bits word_bits_def word_size_bits_def)+ + apply (simp add: image_def) + apply (rule_tac x=n in bexI) + apply (rule refl) + apply (fastforce split: if_split_asm) + done + +lemma dmo_loadWord_reads_respects[Ipc_IF_assms]: + "reads_respects aag l (K (for_each_byte_of_word (\ x. aag_can_read_or_affect aag l x) p)) + (do_machine_op (loadWord p))" + apply (rule gen_asm_ev) + apply (rule use_spec_ev) + apply (rule spec_equiv_valid_hoist_guard) + apply (rule do_machine_op_spec_reads_respects) + apply (simp add: loadWord_def equiv_valid_def2 spec_equiv_valid_def) + apply (rule_tac R'="\rv rv'. for_each_byte_of_word (\y. rv y = rv' y) p" + and Q="\\" and Q'="\\" and P="\" and P'="\" in equiv_valid_2_bind_pre) + apply (rule_tac R'="(=)" and Q="\ r s. p && mask 3 = 0" and Q'="\ r s. p && mask 3 = 0" + and P="\" and P'="\" in equiv_valid_2_bind_pre) + apply (rule return_ev2) + apply (rule_tac f="word_rcat" in arg_cong) + apply (fastforce simp: upto.simps is_aligned_mask for_each_byte_of_word_def word_size_def + intro: is_aligned_no_wrap' word_plus_mono_right) + apply (rule assert_ev2[OF refl]) + apply (rule assert_wp)+ + apply simp+ + apply (clarsimp simp: equiv_valid_2_def in_monad for_each_byte_of_word_def) + apply (fastforce elim: equiv_forD orthD1 simp: ptr_range_def add.commute) + apply (wp wp_post_taut loadWord_inv | simp)+ + done + +lemma complete_signal_reads_respects[Ipc_IF_assms]: + assumes domains_distinct[wp]: "pas_domains_distinct aag" + shows "reads_respects aag l (K (aag_can_read aag ntfnptr \ aag_can_affect aag l ntfnptr)) + (complete_signal ntfnptr receiver)" + unfolding complete_signal_def + by (wp set_simple_ko_reads_respects get_simple_ko_reads_respects as_user_set_register_reads_respects' + | wpc | simp)+ + +lemma handle_arch_fault_reply_reads_respects[Ipc_IF_assms, wp]: + "reads_respects aag l (K (aag_can_read aag thread)) (handle_arch_fault_reply afault thread x y)" + by (simp add: handle_arch_fault_reply_def, wp) + +lemma arch_thread_get_reads_respects[wp]: + "reads_respects aag l (K (aag_can_read_or_affect aag l t)) (arch_thread_get f t)" + unfolding arch_thread_get_def + apply (wpsimp) + apply (fastforce elim: reads_equivE affects_equivE equiv_forE simp: get_tcb_def split: option.splits) + done + +lemma arch_get_sanitise_register_info_reads_respects[Ipc_IF_assms, wp]: + "reads_respects aag l (K (aag_can_read_or_affect aag l t)) (arch_get_sanitise_register_info t)" + by wpsimp + +declare arch_get_sanitise_register_info_inv[Ipc_IF_assms] + +lemma lookup_ipc_buffer_ptr_range'[Ipc_IF_assms]: + "\valid_objs\ + lookup_ipc_buffer True thread + \\rv s. rv = Some buf' \ auth_ipc_buffers s thread = ptr_range buf' msg_align_bits\" + unfolding lookup_ipc_buffer_def + apply (rule hoare_pre) + apply (wp get_cap_wp thread_get_wp' | wpc)+ + apply (clarsimp simp: cte_wp_at_caps_of_state ipc_buffer_has_auth_def get_tcb_ko_at [symmetric]) + apply (frule caps_of_state_tcb_cap_cases [where idx = "tcb_cnode_index 4"]) + apply (simp add: dom_tcb_cap_cases) + apply (clarsimp simp: auth_ipc_buffers_def get_tcb_ko_at [symmetric]) + apply (drule(1) valid_tcb_objs) + apply (drule get_tcb_SomeD)+ + apply (simp add: vm_read_write_def valid_tcb_def valid_ipc_buffer_cap_def split: bool.splits) + done + +lemma lookup_ipc_buffer_aligned'[Ipc_IF_assms]: + "\valid_objs\ + lookup_ipc_buffer True thread + \\rv s. rv = Some buf' \ is_aligned buf' msg_align_bits\" + apply (insert lookup_ipc_buffer_aligned) + apply (fastforce simp: valid_def) + done + +lemma handle_arch_fault_reply_globals_equiv[Ipc_IF_assms]: + "\globals_equiv st and valid_arch_state and (\s. thread \ idle_thread s)\ + handle_arch_fault_reply vmf thread x y + \\_. globals_equiv st\" + by (wpsimp simp: handle_arch_fault_reply_def)+ + +crunch arch_get_sanitise_register_info, handle_arch_fault_reply + for valid_global_objs[Ipc_IF_assms, wp]: "valid_global_objs" + +crunch handle_arch_fault_reply + for valid_arch_state[Ipc_IF_assms,wp]: "\s :: det_state. valid_arch_state s" + +lemma transfer_caps_loop_valid_arch[Ipc_IF_assms]: + "transfer_caps_loop ep buffer n caps slots mi \valid_arch_state :: det_ext state \ _\" + by (wp valid_arch_state_lift_aobj_at_no_caps transfer_caps_loop_aobj_at) + +end + + +global_interpretation Ipc_IF_1?: Ipc_IF_1 +proof goal_cases + interpret Arch . + case 1 show ?case + by (unfold_locales; (fact Ipc_IF_assms)?) +qed + + +context Arch begin global_naming AARCH64 + +lemma copy_mrs_reads_respects[Ipc_IF_assms]: + assumes domains_distinct[wp]: "pas_domains_distinct aag" + shows + "reads_respects aag (l :: 'a subject_label) + (K (aag_can_read_or_affect aag l sender \ aag_can_read_or_affect_ipc_buffer aag l sbuf + \ unat n < 2 ^ (msg_align_bits - word_size_bits))) + (copy_mrs sender sbuf receiver rbuf n)" + unfolding copy_mrs_def fun_app_def + apply (rule gen_asm_ev) + apply (wp mapM_ev'' store_word_offs_reads_respects load_word_offs_reads_respects + as_user_set_register_reads_respects' as_user_reads_respects + | wpc + | simp add: det_setRegister det_getRegister split del: if_split)+ + apply clarsimp + apply (rename_tac n') + apply (subgoal_tac "ptr_range (x + of_nat n' * of_nat word_size) word_size_bits + \ ptr_range x msg_align_bits") + apply (simp add: for_each_byte_of_word_def2) + apply (simp add: aag_can_read_or_affect_ipc_buffer_def) + apply (erule conjE) + apply (rule ballI) + apply (erule bspec) + apply (erule (1) subsetD[rotated]) + apply (rule ptr_range_subset) + apply (simp add: aag_can_read_or_affect_ipc_buffer_def) + apply (simp add: msg_align_bits') + apply (simp add: msg_align_bits word_bits_def) + apply (simp add: word_size_def word_size_bits_def) + apply (subst upto_enum_step_shift_red[where us=3, simplified]) + apply (simp add: msg_align_bits word_bits_def aag_can_read_or_affect_ipc_buffer_def )+ + apply (fastforce simp: image_def) + done + +lemma get_message_info_reads_respects[Ipc_IF_assms]: + "reads_respects aag (l :: 'a subject_label) (K (aag_can_read_or_affect aag l ptr)) (get_message_info ptr)" + apply (simp add: get_message_info_def) + apply (wp as_user_reads_respects | clarsimp simp: getRegister_def)+ + done + +lemma do_normal_transfer_reads_respects[Ipc_IF_assms]: + assumes domains_distinct[wp]: "pas_domains_distinct aag" + shows + "reads_respects aag (l :: 'a subject_label) + (pas_refined aag and valid_mdb and valid_objs + and K (aag_can_read_or_affect aag l sender \ + ipc_buffer_has_read_auth aag (pasObjectAbs aag sender) sbuf \ + ipc_buffer_has_read_auth aag (pasObjectAbs aag receiver) rbuf \ + (grant \ (is_subject aag sender \ is_subject aag receiver)))) + (do_normal_transfer sender sbuf endpoint badge grant receiver rbuf)" + apply (cases grant) + apply (rule gen_asm_ev) + apply (simp add: do_normal_transfer_def) + apply (wp copy_mrs_pas_refined get_message_info_rev lookup_extra_caps_rev + as_user_set_register_reads_respects' set_message_info_reads_respects + transfer_caps_reads_respects copy_mrs_reads_respects lookup_extra_caps_rev + lookup_extra_caps_authorised lookup_extra_caps_auth get_message_info_rev + get_mi_length' get_mi_length validE_E_wp_post_taut + copy_mrs_cte_wp_at hoare_vcg_ball_lift lec_valid_cap' + lookup_extra_caps_srcs[simplified ball_conj_distrib,THEN hoare_conjDR1] + lookup_extra_caps_srcs[simplified ball_conj_distrib,THEN hoare_conjDR2] + | wpc + | simp add: det_setRegister ball_conj_distrib)+ + apply (fastforce intro: aag_has_read_auth_can_read_or_affect_ipc_buffer) + apply (rule gen_asm_ev) + apply (simp add: do_normal_transfer_def transfer_caps_def) + apply (wp ev_irrelevant_bind[where f="get_receive_slots receiver rbuf"] + as_user_set_register_reads_respects' + set_message_info_reads_respects copy_mrs_reads_respects + get_message_info_reads_respects get_mi_length + | wpc + | simp)+ + apply (auto simp: ipc_buffer_has_read_auth_def aag_can_read_or_affect_ipc_buffer_def + dest: reads_read_thread_read_pages split: option.splits) + done + +lemma make_arch_fault_msg_reads_respects[Ipc_IF_assms]: + "reads_respects aag (l :: 'a subject_label) (\y. aag_can_read_or_affect aag l sender) + (make_arch_fault_msg x4 sender)" + apply (case_tac x4) + apply (wp as_user_reads_respects | simp add: det_getRegister det_getRestartPC)+ + done + +lemma set_mrs_equiv_but_for_labels[Ipc_IF_assms]: + "\equiv_but_for_labels (aag :: 'a subject_label PAS) L st and + K (pasObjectAbs aag thread \ L \ + (case buf of (Some buf') \ is_aligned buf' msg_align_bits \ + (\x \ ptr_range buf' msg_align_bits. pasObjectAbs aag x \ L) + | _ \ True))\ + set_mrs thread buf msgs + \\_. equiv_but_for_labels aag L st\" + unfolding set_mrs_def + apply (wp | wpc)+ + apply (subst zipWithM_x_mapM_x) + apply (rule_tac Q'="\_. equiv_but_for_labels aag L st and K (pasObjectAbs aag thread \ L \ + (case buf of (Some buf') \ is_aligned buf' msg_align_bits \ + (\x \ ptr_range buf' msg_align_bits. + pasObjectAbs aag x \ L) + | _ \ True))" in hoare_strengthen_post) + apply (wp mapM_x_wp' store_word_offs_equiv_but_for_labels | simp add: split_def)+ + apply (case_tac x, clarsimp split: if_split_asm elim!: in_set_zipE) + apply (clarsimp simp: for_each_byte_of_word_def) + apply (erule bspec) + apply (clarsimp simp: ptr_range_def) + apply (rule conjI) + apply (erule order_trans[rotated]) + apply (erule is_aligned_no_wrap') + apply (rule mul_word_size_lt_msg_align_bits_ofnat) + apply (fastforce simp: msg_max_length_def msg_align_bits') + apply (erule order_trans) + apply (subst p_assoc_help) + apply (simp add: add.assoc) + apply (rule word_plus_mono_right) + apply (rule word_less_sub_1) + apply (rule_tac y="of_nat msg_max_length * of_nat word_size + (word_size - 1)" + in le_less_trans) + apply (rule word_plus_mono_left) + apply (rule word_mult_le_mono1) + apply (erule disjE) + apply (rule word_of_nat_le) + apply (simp add: msg_max_length_def) + apply clarsimp + apply (rule word_of_nat_le) + apply (simp add: msg_max_length_def) + apply (simp add: word_size_def) + apply (simp add: msg_max_length_def word_size_def) + apply (simp add: msg_max_length_def word_size_def) + apply (rule mul_add_word_size_lt_msg_align_bits_ofnat) + apply (simp add: msg_max_length_def msg_align_bits') + apply (simp add: word_size_def) + apply (erule is_aligned_no_overflow') + apply simp + apply (wp set_object_equiv_but_for_labels hoare_vcg_all_lift hoare_weak_lift_imp | simp)+ + apply (fastforce dest: get_tcb_not_asid_pool_at)+ + done + +lemma set_mrs_reads_respects'[Ipc_IF_assms]: + assumes domains_distinct[wp]: "pas_domains_distinct aag" + shows + "reads_respects aag (l :: 'a subject_label) + (K (ipc_buffer_has_auth aag thread buf \ + (case buf of (Some buf') \ is_aligned buf' msg_align_bits | _ \ True))) + (set_mrs thread buf msgs)" + apply (case_tac "aag_can_read_or_affect aag l thread") + apply ((wp equiv_valid_guard_imp[OF set_mrs_reads_respects] | simp)+)[1] + apply (rule gen_asm_ev) + apply (simp add: equiv_valid_def2) + apply (rule equiv_valid_rv_guard_imp) + apply (case_tac buf) + apply (rule_tac Q="\" and P="\" and L="{pasObjectAbs aag thread}" in ev_invisible[OF domains_distinct]) + apply (clarsimp simp: labels_are_invisible_def) + apply (rule modifies_at_mostI) + apply (simp add: set_mrs_def) + apply ((wp set_object_equiv_but_for_labels | simp | auto dest: get_tcb_not_asid_pool_at)+)[1] + apply (simp) + apply (rule set_mrs_ret_eq) + apply (rename_tac buf') + apply (rule_tac Q="\" and L="{pasObjectAbs aag thread} \ (pasObjectAbs aag) + ` (ptr_range buf' msg_align_bits)" + in ev_invisible[OF domains_distinct]) + apply (auto simp: labels_are_invisible_def ipc_buffer_has_auth_def + dest: reads_read_page_read_thread simp: aag_can_affect_label_def)[1] + apply (rule modifies_at_mostI) + apply (wp set_mrs_equiv_but_for_labels | simp)+ + apply (rule set_mrs_ret_eq) + by simp + +end + + +global_interpretation Ipc_IF_2?: Ipc_IF_2 +proof goal_cases + interpret Arch . + case 1 show ?case + by (unfold_locales; (fact Ipc_IF_assms)?) +qed + +end diff --git a/proof/infoflow/AARCH64/ArchNoninterference.thy b/proof/infoflow/AARCH64/ArchNoninterference.thy new file mode 100644 index 0000000000..c90455a4ac --- /dev/null +++ b/proof/infoflow/AARCH64/ArchNoninterference.thy @@ -0,0 +1,501 @@ +(* + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +theory ArchNoninterference +imports Noninterference +begin + +context Arch begin global_naming AARCH64 + +named_theorems Noninterference_assms + +(* clagged straight from ADT_AC.do_user_op_respects *) +lemma do_user_op_if_integrity[Noninterference_assms]: + "\invs and integrity aag X st and is_subject aag \ cur_thread and pas_refined aag\ + do_user_op_if uop tc + \\_. integrity aag X st\" + apply (simp add: do_user_op_if_def) + apply (wpsimp wp: dmo_user_memory_update_respects_Write dmo_device_update_respects_Write + hoare_vcg_all_lift hoare_vcg_imp_lift + wp_del: select_wp) + apply (rule hoare_pre_cont) + apply (wp | wpc | clarsimp)+ + apply (rule conjI) + apply clarsimp + apply (simp add: restrict_map_def ptable_lift_s_def ptable_rights_s_def split: if_splits) + apply (drule_tac auth=Write in user_op_access') + apply (simp add: vspace_cap_rights_to_auth_def)+ + apply clarsimp + apply (simp add: restrict_map_def ptable_lift_s_def ptable_rights_s_def split: if_splits) + apply (drule_tac auth=Write in user_op_access') + apply (simp add: vspace_cap_rights_to_auth_def)+ + done + +lemma do_user_op_if_globals_equiv_scheduler[Noninterference_assms]: + "\globals_equiv_scheduler st and invs\ + do_user_op_if tc uop + \\_. globals_equiv_scheduler st\" + apply (simp add: do_user_op_if_def) + apply (wpsimp wp: dmo_user_memory_update_globals_equiv_scheduler + dmo_device_memory_update_globals_equiv_scheduler)+ + apply (auto simp: ptable_lift_s_def ptable_rights_s_def) + done + +crunch do_user_op_if + for silc_dom_equiv[Noninterference_assms, wp]: "silc_dom_equiv aag st" + (ignore: do_machine_op user_memory_update wp: crunch_wps) + +lemma sameFor_scheduler_affects_equiv[Noninterference_assms]: + "\ (s,s') \ same_for aag PSched; (s,s') \ same_for aag (Partition l); + invs (internal_state_if s); invs (internal_state_if s') \ + \ scheduler_equiv aag (internal_state_if s) (internal_state_if s') \ + scheduler_affects_equiv aag (OrdinaryLabel l) (internal_state_if s) (internal_state_if s')" + apply (rule conjI) + apply (blast intro: sameFor_scheduler_equiv) + apply (clarsimp simp: scheduler_affects_equiv_def arch_scheduler_affects_equiv_def + sameFor_def silc_dom_equiv_def reads_scheduler_def sameFor_scheduler_def) + (* simplifying using sameFor_subject_def in assumptions causes simp to loop *) + apply (simp (no_asm_use) add: sameFor_subject_def disjoint_iff_not_equal Bex_def) + apply (blast intro: globals_equiv_to_scheduler_globals_frame_equiv globals_equiv_to_cur_thread_eq) + done + +lemma do_user_op_if_partitionIntegrity[Noninterference_assms]: + "\partitionIntegrity aag st and pas_refined aag and invs and is_subject aag \ cur_thread\ + do_user_op_if tc uop + \\_. partitionIntegrity aag st\" + apply (rule_tac Q'="\rv s. integrity (aag\pasMayActivate := False, pasMayEditReadyQueues := False\) + (scheduler_affects_globals_frame st) st s \ + domain_fields_equiv st s \ idle_thread s = idle_thread st \ + globals_equiv_scheduler st s \ silc_dom_equiv aag st s" + in hoare_strengthen_post) + apply (wp hoare_vcg_conj_lift do_user_op_if_integrity do_user_op_if_globals_equiv_scheduler + hoare_vcg_all_lift domain_fields_equiv_lift[where Q="\" and R="\"] | simp)+ + apply (clarsimp simp: partitionIntegrity_def)+ + done + +lemma arch_activate_idle_thread_reads_respects_g[Noninterference_assms, wp]: + "reads_respects_g aag l \ (arch_activate_idle_thread t)" + unfolding arch_activate_idle_thread_def by wpsimp + +crunch handle_spurious_irq + for domain[wp]: "\s. Q (domain_time s) (domain_index s) (domain_list s)" + and irq_state_of_state[wp]: "\s. P (irq_state_of_state s)" + +lemma handle_spurious_irq_reads_respect_scheduler[Noninterference_assms]: + "reads_respects_scheduler aag l \ handle_spurious_irq" + unfolding handle_spurious_irq_def + by wpsimp + +definition arch_globals_equiv_strengthener :: "machine_state \ machine_state \ bool" where + "arch_globals_equiv_strengthener ms ms' \ True" + +declare arch_globals_equiv_strengthener_def[simp] + +lemma arch_globals_equiv_strengthener_thread_independent[Noninterference_assms]: + "arch_globals_equiv_strengthener (machine_state s) (machine_state s') + \ \ct ct' it it'. arch_globals_equiv ct it (kheap s) (kheap s') + (arch_state s) (arch_state s') (machine_state s) (machine_state s') = + arch_globals_equiv ct' it' (kheap s) (kheap s') + (arch_state s) (arch_state s') (machine_state s) (machine_state s')" + by auto + +lemma integrity_asids_update_reference_state[Noninterference_assms]: + "is_subject aag t + \ integrity_asids aag {pasSubject aag} x a s (s\kheap := (kheap s)(t \ blah)\)" + by (clarsimp simp: integrity_asids_def opt_map_def) + +lemma inte_obj_arch: + assumes inte_obj: "(integrity_obj_atomic aag activate subjects l)\<^sup>*\<^sup>* ko ko'" + assumes "ko = Some (ArchObj ao)" + assumes "ko \ ko'" + shows "integrity_obj_atomic aag activate subjects l ko ko'" +proof (cases "l \ subjects") + case True + then show ?thesis by (fastforce intro: integrity_obj_atomic.intros) +next + case False + note l = this + have "\ao'. ko = Some (ArchObj ao) \ + ko \ ko' \ + integrity_obj_atomic aag activate subjects l ko ko'" + using inte_obj + proof (induct rule: rtranclp_induct) + case base + then show ?case by clarsimp + next + case (step y z) + have "\ao'. ko' = Some (ArchObj ao')" + using False inte_obj assms + by (auto elim!: rtranclp_induct integrity_obj_atomic.cases) + then show ?case using step.hyps + by (fastforce intro: arch_integrity_obj_atomic.intros integrity_obj_atomic.intros + elim!: integrity_obj_atomic.cases arch_integrity_obj_atomic.cases) + qed + then show ?thesis + using assms by fastforce +qed + +lemma asid_pool_into_aag: + "\ pool_for_asid asid s = Some p; kheap s p = Some (ArchObj (ASIDPool pool)); + pool r = Some entry; ap_vspace entry = p'; pas_refined aag s \ + \ abs_has_auth_to aag Control p p'" + apply (rule pas_refined_mem [rotated], assumption) + apply (rule sta_vref) + apply (rule state_vrefsD) + apply (erule pool_for_asid_vs_lookupD) + apply (fastforce simp: opt_map_def) + apply fastforce + apply (fastforce simp: vs_refs_aux_def graph_of_def image_iff) + done + +lemma owns_mapping_owns_asidpool: + "\ pool_for_asid asid s = Some p; kheap s p = Some (ArchObj (ASIDPool pool)); + pool r = Some entry; ap_vspace entry = p'; pas_refined aag s; is_subject aag p'; + pas_wellformed (aag\pasSubject := (pasObjectAbs aag p)\) \ + \ is_subject aag p" + apply (frule asid_pool_into_aag) + apply assumption+ + apply (drule pas_wellformed_pasSubject_update_Control) + apply assumption + apply simp + done + +lemma partitionIntegrity_subjectAffects_aobj': + "\ pool_for_asid asid s = Some x; kheap s x = Some (ArchObj ao); ao \ ao'; + pas_refined aag s; silc_inv aag st s; pas_wellformed_noninterference aag; + arch_integrity_obj_atomic (aag\pasMayActivate := False, pasMayEditReadyQueues := False\) + {pasSubject aag} (pasObjectAbs aag x) ao ao' \ + \ subject_can_affect_label_directly aag (pasObjectAbs aag x)" + unfolding arch_integrity_obj_atomic.simps asid_pool_integrity_def + sorry (* FIXME AARCH64 IF: vmid may change *) +(* + apply clarsimp + apply (rule ccontr) + apply (drule fun_noteqD) + apply (erule exE, rename_tac r) + apply (drule_tac x=r in spec) + apply (clarsimp dest!: not_sym[where t=None]) + apply (subgoal_tac "is_subject aag x", force intro: affects_lrefl) + apply (frule (1) aag_Control_into_owns) + apply (frule (3) asid_pool_into_aag) + apply simp + apply (frule (1) pas_wellformed_noninterference_control_to_eq) + apply (fastforce elim!: silc_inv_cnode_onlyE obj_atE simp: is_cap_table_def) + apply clarsimp + done +*) + +lemma partitionIntegrity_subjectAffects_aobj[Noninterference_assms]: + assumes par_inte: "partitionIntegrity aag s s'" + and "kheap s x = Some (ArchObj ao)" + "kheap s x \ kheap s' x" + "silc_inv aag st s" + "pas_refined aag s" + "pas_wellformed_noninterference aag" + notes inte_obj = par_inte[THEN partitionIntegrity_integrity, THEN integrity_subjects_obj, + THEN spec[where x=x], simplified integrity_obj_def, simplified] + shows "subject_can_affect_label_directly aag (pasObjectAbs aag x)" +proof (cases "pasObjectAbs aag x = pasSubject aag") + case True + then show ?thesis by (simp add: subjectAffects.intros(1)) +next + case False + obtain ao' where ao': "kheap s' x = Some (ArchObj ao')" + using assms False inte_obj_arch[OF inte_obj] + by (auto elim: integrity_obj_atomic.cases) + have arch_tro: + "arch_integrity_obj_atomic (aag\pasMayActivate := False, pasMayEditReadyQueues := False\) + {pasSubject aag} (pasObjectAbs aag x) ao ao'" + using assms False ao' inte_obj_arch[OF inte_obj] + by (auto elim: integrity_obj_atomic.cases) + obtain asid where asid: "pool_for_asid asid s = Some x" + using assms False inte_obj_arch[OF inte_obj] + integrity_subjects_asids[OF partitionIntegrity_integrity[OF par_inte]] + sorry (* FIXME AARCH64 IF: arch objects include VCPUs *) +(* + by (fastforce elim!: integrity_obj_atomic.cases arch_integrity_obj_atomic.cases + simp: integrity_asids_def aobjs_of_Some opt_map_def pool_for_asid_def)+ +*) + show ?thesis + using assms ao' asid arch_tro + by (fastforce dest: partitionIntegrity_subjectAffects_aobj') +qed + +term ap_vspace + +(* FIXME AARCH64 IF: modify equiv_asids *) +lemma partitionIntegrity_subjectAffects_asid[Noninterference_assms]: + "\ partitionIntegrity aag s s'; pas_refined aag s; valid_objs s; + valid_arch_state s; valid_arch_state s'; pas_wellformed_noninterference aag; + silc_inv aag st s'; invs s'; \ equiv_asids (\x. pasASIDAbs aag x = a) s s' \ + \ a \ subjectAffects (pasPolicy aag) (pasSubject aag)" + apply (clarsimp simp: equiv_asids_def equiv_asid_def asid_pool_at_kheap) + apply (case_tac "arm_asid_table (arch_state s) (asid_high_bits_of asid) = + arm_asid_table (arch_state s') (asid_high_bits_of asid)") + apply (clarsimp simp: valid_arch_state_def valid_asid_table_def) + apply (erule disjE) + apply (case_tac "kheap s' pool_ptr = None"; clarsimp) + apply (prop_tac "pool_ptr \ dom (asid_pools_of s')") + apply (fastforce simp: not_in_domIff asid_pools_of_ko_at obj_at_def) + apply blast + apply (case_tac "\asid_pool. y = ArchObj (ASIDPool asid_pool)"; clarsimp) + apply (prop_tac "pool_ptr \ dom (asid_pools_of s)") + apply (fastforce simp: not_in_domIff asid_pools_of_ko_at obj_at_def) + apply blast + apply (prop_tac "pool_ptr \ dom (asid_pools_of s')") + apply (fastforce simp: not_in_domIff asid_pools_of_ko_at obj_at_def) + apply blast + apply clarsimp + apply (drule partitionIntegrity_integrity) + apply (drule integrity_subjects_obj) + apply (drule_tac x="pool_ptr" in spec)+ + apply (clarsimp simp: asid_pools_of_ko_at obj_at_def) + apply (case_tac "map_option ap_vspace (asid_pool' (asid_low_bits_of asid)) \ + map_option ap_vspace (asid_pool (asid_low_bits_of asid))") + apply (rule affects_asidpool_map) + apply (rule pas_refined_asid_mem) + apply (drule tro_tro_alt, erule integrity_obj_alt.cases; simp) + apply (drule_tac t="pasSubject aag" in sym) + apply simp + apply (rule sata_asidpool) + apply assumption + apply assumption + apply (clarsimp simp: arch_integrity_obj_alt.simps asid_pool_integrity_def) + apply (drule_tac x="asid_low_bits_of asid" in spec)+ + apply clarsimp + apply (drule owns_mapping_owns_asidpool[rotated]) + apply ((simp | blast intro: pas_refined_Control[THEN sym] + | fastforce simp: pool_for_asid_def + intro: pas_wellformed_pasSubject_update[simplified])+)[6] + apply (drule_tac t="pasSubject aag" in sym)+ + apply simp + apply (rule sata_asidpool) + apply assumption + apply assumption + apply assumption + subgoal sorry (* FIXME AARCH64 IF: vmid may change *) + apply clarsimp + apply (drule partitionIntegrity_integrity) + apply (clarsimp simp: integrity_def integrity_asids_def) + apply (drule_tac x=asid in spec)+ + apply (fastforce intro: affects_lrefl) + done + +(* clagged mostly from Scheduler_IF.dmo_storeWord_reads_respects_scheduler *) +lemma dmo_storeWord_reads_respects_g[Noninterference_assms, wp]: + "reads_respects_g aag l \ (do_machine_op (storeWord ptr w))" + apply (clarsimp simp: do_machine_op_def bind_def gets_def get_def return_def fail_def + select_f_def storeWord_def assert_def simpler_modify_def) + apply (fold simpler_modify_def) + apply (intro impI conjI) + apply (rule ev_modify) + apply (rule conjI) + apply (fastforce simp: reads_equiv_g_def globals_equiv_def reads_equiv_def2 states_equiv_for_def + equiv_for_def equiv_asids_def equiv_asid_def silc_dom_equiv_def upto.simps) + apply (rule affects_equiv_machine_state_update, assumption) + apply (fastforce simp: equiv_for_def affects_equiv_def states_equiv_for_def upto.simps) + apply (simp add: equiv_valid_def2 equiv_valid_2_def) + done + +lemma set_vm_root_reads_respects: + "reads_respects aag l \ (set_vm_root tcb)" + by (rule reads_respects_unobservable_unit_return) wp+ + +lemmas set_vm_root_reads_respects_g[wp] = + reads_respects_g[OF set_vm_root_reads_respects, + OF doesnt_touch_globalsI[where P="valid_global_arch_objs"], + simplified, OF set_vm_root_globals_equiv] + +lemma arch_switch_to_thread_reads_respects_g'[Noninterference_assms]: + "equiv_valid (reads_equiv_g aag) (affects_equiv aag l) + (\s s'. affects_equiv aag l s s' \ + arch_globals_equiv_strengthener (machine_state s) (machine_state s')) + (\s. is_subject aag t) + (arch_switch_to_thread t)" + apply (simp add: arch_switch_to_thread_def) + apply (rule equiv_valid_guard_imp) + apply (wp bind_ev_general thread_get_reads_respects_g | simp)+ + sorry + +(* consider rewriting the return-value assumption using equiv_valid_rv_inv *) +lemma ev2_invisible'[Noninterference_assms]: + assumes domains_distinct: "pas_domains_distinct aag" + shows + "\ labels_are_invisible aag l L; labels_are_invisible aag l L'; + modifies_at_most aag L Q f; modifies_at_most aag L' Q' g; + doesnt_touch_globals Q f; doesnt_touch_globals Q' g; + \st :: det_state. f \\s. arch_globals_equiv_strengthener (machine_state st) (machine_state s)\; + \st :: det_state. g \\s. arch_globals_equiv_strengthener (machine_state st) (machine_state s)\; + \s t. P s \ P' t \ (\(rva,s') \ fst (f s). \(rvb,t') \ fst (g t). W rva rvb) \ + \ equiv_valid_2 (reads_equiv_g aag) + (\s s'. affects_equiv aag l s s' \ + arch_globals_equiv_strengthener (machine_state s) (machine_state s')) + (\s s'. affects_equiv aag l s s' \ + arch_globals_equiv_strengthener (machine_state s) (machine_state s')) + W (P and Q) (P' and Q') f g" + apply (clarsimp simp: equiv_valid_2_def) + apply (rule conjI) + apply blast + apply (drule_tac s=s in modifies_at_mostD, assumption+) + apply (drule_tac s=t in modifies_at_mostD, assumption+) + apply (drule_tac s=s in globals_equivI, assumption+) + apply (drule_tac s=t in globals_equivI, assumption+) + apply (frule (1) equiv_but_for_reads_equiv[OF domains_distinct]) + apply (frule_tac s=t in equiv_but_for_reads_equiv[OF domains_distinct], assumption) + apply (drule (1) equiv_but_for_affects_equiv[OF domains_distinct]) + apply (drule_tac s=t in equiv_but_for_affects_equiv[OF domains_distinct], assumption) + apply (clarsimp simp: reads_equiv_g_def) + apply (blast intro: reads_equiv_trans reads_equiv_sym affects_equiv_trans + affects_equiv_sym globals_equiv_trans globals_equiv_sym) + done + +lemmas dmo_mol_reads_respects_g[wp] = + reads_respects_g[OF dmo_mol_reads_respects, + OF doesnt_touch_globalsI[where P="\"], + simplified, + OF dmo_mol_globals_equiv] + +lemma set_global_user_vspace_reads_respects_g[Noninterference_assms, wp]: + "reads_respects_g aag l \ (set_global_user_vspace)" + unfolding set_global_user_vspace_def setVSpaceRoot_def + apply wpsimp + apply (clarsimp simp: reads_equiv_g_def globals_equiv_def) + done + +lemma arch_switch_to_idle_thread_reads_respects_g[Noninterference_assms, wp]: + "reads_respects_g aag l \ (arch_switch_to_idle_thread)" + apply (simp add: arch_switch_to_idle_thread_def) + apply wp + sorry +(* + apply (clarsimp simp: reads_equiv_g_def globals_equiv_idle_thread_ptr) + done +*) + +lemma arch_globals_equiv_threads_eq[Noninterference_assms]: + "arch_globals_equiv t' t'' kh kh' as as' ms ms' + \ arch_globals_equiv t t kh kh' as as' ms ms'" + by clarsimp + +lemma arch_globals_equiv_globals_equiv_scheduler[Noninterference_assms, elim]: + "arch_globals_equiv (cur_thread t) (idle_thread s) (kheap s) (kheap t) + (arch_state s) (arch_state t) (machine_state s) (machine_state t) + \ arch_globals_equiv_scheduler (kheap s) (kheap t) (arch_state s) (arch_state t)" + by (auto simp: arch_globals_equiv_scheduler_def) + +lemma getActiveIRQ_ret_no_dmo[Noninterference_assms, wp]: + "\\\ getActiveIRQ in_kernel \\rv s. \x. rv = Some x \ x \ maxIRQ\" + apply (simp add: getActiveIRQ_def) + apply (rule hoare_pre) + apply (insert irq_oracle_max_irq) + apply (wp dmo_getActiveIRQ_irq_masks) + apply clarsimp + done + +(*FIXME: Move to scheduler_if*) +lemma dmo_getActive_IRQ_reads_respect_scheduler[Noninterference_assms]: + "reads_respects_scheduler aag l (\s. irq_masks_of_state st = irq_masks_of_state s) + (do_machine_op (getActiveIRQ in_kernel))" + apply (simp add: getActiveIRQ_def) + apply (simp add: dmo_distr dmo_if_distr dmo_gets_distr dmo_modify_distr cong: if_cong) + apply wp + apply (rule ev_modify[where P=\]) + apply (clarsimp simp: scheduler_equiv_def domain_fields_equiv_def globals_equiv_scheduler_def + scheduler_affects_equiv_def arch_scheduler_affects_equiv_def + states_equiv_for_def equiv_for_def equiv_asids_def equiv_asid_def + scheduler_globals_frame_equiv_def silc_dom_equiv_def idle_equiv_def) + apply wp + apply (rule_tac P="\s. irq_masks_of_state st = irq_masks_of_state s" in gets_ev') + apply wp + apply clarsimp + apply (simp add: scheduler_equiv_def) + done + +lemma integrity_hyp_update_reference_state[Noninterference_assms]: + "is_subject aag t + \ integrity_hyp aag {pasSubject aag} x s (s\kheap := (kheap s)(t \ blah)\)" + by (auto simp: integrity_hyp_def vcpu_integrity_def vcpu_of_state_def opt_map_def) + +lemma integrity_fpu_update_reference_state[Noninterference_assms]: + "is_subject aag t + \ integrity_fpu aag {pasSubject aag} x s (s\kheap := (kheap s)(t \ blah)\)" + by (auto simp: integrity_fpu_def fpu_of_state_def) + +(* FIXME AARCH64 IF: not true *) +lemma valid_cur_hyp_triv[Noninterference_assms]: + "valid_cur_hyp s" + sorry + +(* FIXME AARCH64 IF: not true *) +lemma arch_tcb_get_registers_equality[Noninterference_assms]: + "arch_tcb_get_registers (tcb_arch tcb) = arch_tcb_get_registers (tcb_arch tcb') + \ tcb_arch tcb = tcb_arch tcb'" + sorry +(* by (auto simp: arch_tcb_get_registers_def intro: arch_tcb.equality user_context.expand) *) + + +definition irq_at' :: "bool \ nat \ (irq \ bool) \ irq option" where + "irq_at' in_kernel pos masks \ + let i = irq_oracle pos in (if masks i \ in_kernel \ i \ non_kernel_IRQs then None else Some i)" + +lemma dmo_getActiveIRQ_wp': + "\\s. P (irq_at' in_kernel (irq_state (machine_state s) + 1) (irq_masks (machine_state s))) + (s\machine_state := (machine_state s\irq_state := irq_state (machine_state s) + 1\)\)\ + do_machine_op (getActiveIRQ in_kernel) + \P\" + apply (simp add: do_machine_op_def getActiveIRQ_def non_kernel_IRQs_def) + apply (wp modify_wp | wpc)+ + apply clarsimp + apply (erule use_valid) + apply (wp modify_wp) + apply (auto simp: Let_def non_kernel_IRQs_def irq_at'_def split: if_splits) + done + +lemma getActiveIRQ_ev2[Noninterference_assms]: + "equiv_valid_2 (scheduler_equiv aag) + (scheduler_affects_equiv aag l) (scheduler_affects_equiv aag l) + (\irq irq'. irq = irq' \ irq = None \ irq' \ Some ` non_kernel_IRQs) + (\s. irq_masks_of_state st = irq_masks_of_state s) + (\s. irq_masks_of_state st = irq_masks_of_state s) + (do_machine_op (getActiveIRQ True)) (do_machine_op (getActiveIRQ False))" + apply (clarsimp simp: equiv_valid_def2 equiv_valid_2_def) + apply (erule use_valid, rule dmo_getActiveIRQ_wp')+ + apply (intro conjI) + apply (clarsimp simp: scheduler_equiv_def irq_at'_def Let_def) + apply clarsimp + apply (clarsimp simp: scheduler_equiv_def domain_fields_equiv_def globals_equiv_scheduler_def + silc_dom_equiv_def equiv_for_def) + apply (clarsimp simp: scheduler_affects_equiv_def) + apply (intro conjI impI) + apply (clarsimp simp: states_equiv_for_def equiv_for_def equiv_asids_def) + apply (clarsimp simp: scheduler_globals_frame_equiv_def) + apply (clarsimp simp: arch_scheduler_affects_equiv_def) + done + +lemma non_kernel_IRQs_le_maxIRQ[Noninterference_assms]: + "irq \ non_kernel_IRQs \ irq \ maxIRQ" + unfolding non_kernel_IRQs_def Kernel_Config.maxIRQ_def irqVGICMaintenance_def irqVTimerEvent_def + by auto + +end + + +requalify_consts AARCH64.arch_globals_equiv_strengthener +requalify_facts AARCH64.arch_globals_equiv_strengthener_thread_independent + + +global_interpretation Noninterference_1?: Noninterference_1 _ arch_globals_equiv_strengthener +proof goal_cases + interpret Arch . + case 1 show ?case + by (unfold_locales; (fact Noninterference_assms)?) +qed + + +sublocale valid_initial_state \ valid_initial_state?: + Noninterference_valid_initial_state arch_globals_equiv_strengthener .. + +end diff --git a/proof/infoflow/AARCH64/ArchPasUpdates.thy b/proof/infoflow/AARCH64/ArchPasUpdates.thy new file mode 100644 index 0000000000..0ce6bf4321 --- /dev/null +++ b/proof/infoflow/AARCH64/ArchPasUpdates.thy @@ -0,0 +1,119 @@ +(* + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +theory ArchPasUpdates +imports PasUpdates +begin + +context Arch begin + +named_theorems PasUpdates_assms + +crunch arch_post_cap_deletion, arch_finalise_cap, prepare_thread_delete + for domain_fields[PasUpdates_assms, wp]: "domain_fields P" + ( wp: syscall_valid crunch_wps rec_del_preservation cap_revoke_preservation modify_wp + simp: crunch_simps check_cap_at_def filterM_mapM unless_def + ignore: without_preemption filterM rec_del check_cap_at cap_revoke + ignore_del: create_cap_ext cap_insert_ext cap_move_ext + empty_slot_ext cap_swap_ext set_thread_state_act tcb_sched_action reschedule_required) + +end + + +global_interpretation PasUpdates_1?: PasUpdates_1 +proof goal_cases + interpret Arch . + case 1 show ?case + by (unfold_locales; (fact PasUpdates_assms)?) +qed + + +context Arch begin + +crunch arch_perform_invocation, arch_post_modify_registers, init_arch_objects, + arch_invoke_irq_control, arch_invoke_irq_handler, handle_arch_fault_reply, + arch_prepare_set_domain, arch_post_set_flags + for domain_fields[PasUpdates_assms, wp]: "domain_fields P" + (wp: syscall_valid crunch_wps mapME_x_inv_wp + simp: crunch_simps check_cap_at_def detype_def mapM_x_defsym + ignore: check_cap_at syscall + ignore_del: set_domain set_priority possible_switch_to + rule: transfer_caps_loop_pres) + +lemma state_asids_to_policy_aux_pasSubject_update: + "state_asids_to_policy_aux (aag\pasSubject := x\) caps asid vrefs = + state_asids_to_policy_aux aag caps asid vrefs" + apply (rule equalityI) + apply clarify + apply (erule state_asids_to_policy_aux.cases + | simp + | fastforce intro: state_asids_to_policy_aux.intros)+ + apply clarify + apply (erule state_asids_to_policy_aux.cases) + apply (simp, subst pasObjectAbs_pasSubject_update[symmetric] + , subst pasASIDAbs_pasSubject_update[symmetric] + , rule state_asids_to_policy_aux.intros + , assumption+)+ + done + +lemma state_asids_to_policy_pasSubject_update[PasUpdates_assms]: + "state_asids_to_policy (aag\pasSubject := x\) s = + state_asids_to_policy aag s" + by (simp add: state_asids_to_policy_aux_pasSubject_update) + +lemma state_asids_to_policy_aux_pasMayActivate_update: + "state_asids_to_policy_aux (aag\pasMayActivate := x\) caps asid_tab vrefs = + state_asids_to_policy_aux aag caps asid_tab vrefs" + apply (rule equalityI) + apply clarify + apply (erule state_asids_to_policy_aux.cases + | simp + | fastforce intro: state_asids_to_policy_aux.intros)+ + apply clarify + apply (erule state_asids_to_policy_aux.cases) + apply (simp, subst pasObjectAbs_pasMayActivate_update[symmetric] + , subst pasASIDAbs_pasMayActivate_update[symmetric] + , rule state_asids_to_policy_aux.intros + , assumption+)+ + done + +lemma state_asids_to_policy_pasMayActivate_update[PasUpdates_assms]: + "state_asids_to_policy (aag\pasMayActivate := x\) s = + state_asids_to_policy aag s" + by (simp add: state_asids_to_policy_aux_pasMayActivate_update) + +lemma state_asids_to_policy_aux_pasMayEditReadyQueues_update: + "state_asids_to_policy_aux (aag\pasMayEditReadyQueues := x\) caps asid_tab vrefs = + state_asids_to_policy_aux aag caps asid_tab vrefs" + apply (rule equalityI) + apply (clarify) + apply (erule state_asids_to_policy_aux.cases + | simp + | fastforce intro: state_asids_to_policy_aux.intros)+ + apply (clarify) + apply (erule state_asids_to_policy_aux.cases) + apply (simp, subst pasObjectAbs_pasMayEditReadyQueues_update[symmetric] + , subst pasASIDAbs_pasMayEditReadyQueues_update[symmetric] + , rule state_asids_to_policy_aux.intros + , assumption+)+ + done + +lemma state_asids_to_policy_pasMayEditReadyQueues_update[PasUpdates_assms]: + "state_asids_to_policy (aag\pasMayEditReadyQueues := x\) s = + state_asids_to_policy aag s" + by (simp add: state_asids_to_policy_aux_pasMayEditReadyQueues_update) + +end + + +global_interpretation PasUpdates_2?: PasUpdates_2 +proof goal_cases + interpret Arch . + case 1 show ?case + by (unfold_locales; (fact PasUpdates_assms)?) +qed + +end diff --git a/proof/infoflow/AARCH64/ArchRetype_IF.thy b/proof/infoflow/AARCH64/ArchRetype_IF.thy new file mode 100644 index 0000000000..3d20ec3251 --- /dev/null +++ b/proof/infoflow/AARCH64/ArchRetype_IF.thy @@ -0,0 +1,637 @@ +(* + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +theory ArchRetype_IF +imports Retype_IF +begin + +context Arch begin global_naming AARCH64 + +named_theorems Retype_IF_assms + +lemma do_ipc_transfer_valid_arch_no_caps[wp]: + "do_ipc_transfer s ep bg grt r \valid_arch_state\" + by (wpsimp wp: valid_arch_state_lift_aobj_at_no_caps do_ipc_transfer_aobj_at) + +lemma create_cap_valid_arch_state_no_caps[wp]: + "\valid_arch_state \ create_cap tp sz p dev ref + \\rv. valid_arch_state\" + by (wp valid_arch_state_lift_aobj_at_no_caps create_cap_aobj_at) + +lemma modify_underlying_memory_update_0_ev: + "equiv_valid_inv (equiv_machine_state P) (equiv_machine_state Q) \ + (modify (underlying_memory_update (\m. m(x := word_rsplit 0 ! 7, + x + 1 := word_rsplit 0 ! 6, + x + 2 := word_rsplit 0 ! 5, + x + 3 := word_rsplit 0 ! 4, + x + 4 := word_rsplit 0 ! 3, + x + 5 := word_rsplit 0 ! 2, + x + 6 := word_rsplit 0 ! Suc 0, + x + 7 := word_rsplit 0 ! 0))))" + by (fastforce simp: equiv_valid_def2 equiv_valid_2_def in_monad + elim: equiv_forE + intro: equiv_forI) + +lemma storeWord_ev: + "equiv_valid_inv (equiv_machine_state P) (equiv_machine_state Q) \ (storeWord x 0)" + unfolding storeWord_def + by (wp modify_underlying_memory_update_0_ev assert_inv | simp add: no_irq_def upto.simps comp_def)+ + +lemma clearMemory_ev[Retype_IF_assms]: + "equiv_valid_inv (equiv_machine_state P) (equiv_machine_state Q) (\_. True) (clearMemory ptr bits)" + unfolding clearMemory_def + apply (rule equiv_valid_guard_imp) + apply (rule mapM_x_ev[OF storeWord_ev]) + apply (rule wp_post_taut | simp)+ + done + +lemma freeMemory_ev[Retype_IF_assms]: + "equiv_valid_inv (equiv_machine_state P) (equiv_machine_state Q) (\_. True) (freeMemory ptr bits)" + unfolding freeMemory_def + apply (rule equiv_valid_guard_imp) + apply (rule mapM_x_ev[OF storeWord_ev]) + apply (rule wp_post_taut | simp)+ + done + +lemma set_pt_globals_equiv: + "\globals_equiv st and (\s. a \ global_pt s)\ + set_pt a b + \\_. globals_equiv st\" + apply (unfold set_pt_def gets_map_def) + apply (subst gets_apply) + apply (wpsimp wp: gets_apply_ev set_object_globals_equiv) + apply (fastforce elim: reads_equivE equiv_forE simp: opt_map_def) + done + +lemma set_pt_reads_respects: + "reads_respects aag l (K (is_subject aag a)) (set_pt a b)" + apply (unfold set_pt_def gets_map_def) + apply (subst gets_apply) + apply (wpsimp wp: gets_apply_ev set_object_reads_respects) + apply (fastforce elim: reads_equivE equiv_forE simp: opt_map_def) + done + +lemma set_pt_reads_respects_g: + "reads_respects_g aag l (\ s. is_subject aag ptr \ ptr \ global_pt s) (set_pt ptr pt)" + by (fastforce intro: equiv_valid_guard_imp[OF reads_respects_g] doesnt_touch_globalsI + set_pt_reads_respects set_pt_globals_equiv) + +crunch clearMemory + for irq_state[Retype_IF_assms, wp]: "\s. P (irq_state s)" + (wp: crunch_wps simp: crunch_simps storeWord_def ignore_del: clearMemory) + +crunch freeMemory + for irq_state[Retype_IF_assms, wp]: "\s. P (irq_state s)" + (wp: crunch_wps simp: crunch_simps storeWord_def) + +lemma get_pt_rev: + "reads_equiv_valid_inv A aag (K (is_subject aag ptr)) (get_pt ptr)" + apply (unfold gets_map_def) + apply (subst gets_apply) + apply (wpsimp wp: gets_apply_ev) + apply (fastforce elim: reads_equivE equiv_forE simp: opt_map_def) + done + +lemma get_pt_revg: + "reads_equiv_valid_g_inv A aag (\ s. ptr = arm_us_global_vspace (arch_state s)) (get_pt ptr)" + apply (unfold gets_map_def) + apply (subst gets_apply) + apply (wp gets_apply_ev') + defer + apply (wp hoare_drop_imps) + apply (rule conjI) + apply assumption + apply simp + apply (auto simp: reads_equiv_g_def globals_equiv_def opt_map_def) + done + +lemma store_pte_reads_respects: + "reads_respects aag l (K (is_subject aag (table_base pt_t ptr))) (store_pte pt_t ptr pte)" + unfolding store_pte_def fun_app_def + apply (wp set_pt_reads_respects get_pt_rev) + apply (clarsimp) + done + +lemma store_pte_globals_equiv: + "\globals_equiv s and (\ s. table_base pt_t ptr \ arm_us_global_vspace (arch_state s))\ + store_pte pt_t ptr pde + \\_. globals_equiv s\" + unfolding store_pte_def + apply (wp set_pt_globals_equiv) + apply simp + done + +lemma store_pte_reads_respects_g: + "reads_respects_g aag l (\s. is_subject aag (table_base pt_t ptr) \ + table_base pt_t ptr \ arm_us_global_vspace (arch_state s)) + (store_pte pt_t ptr pte)" + by (fastforce intro: equiv_valid_guard_imp[OF reads_respects_g] doesnt_touch_globalsI + store_pte_reads_respects store_pte_globals_equiv) + +lemma get_pte_rev: + "reads_equiv_valid_inv A aag (K (is_subject aag (table_base pt_t ptr))) (get_pte pt_t ptr)" + unfolding gets_map_def fun_app_def + apply (subst gets_apply) + apply (wpsimp wp: gets_apply_ev) + apply (fastforce elim: reads_equivE equiv_forE simp: ptes_of_def obind_def opt_map_def) + done + +lemma get_pte_revg: + "reads_equiv_valid_g_inv A aag (\s. (table_base pt_t ptr) = arm_us_global_vspace (arch_state s)) + (get_pte pt_t ptr)" + apply (unfold gets_map_def) + apply (subst gets_apply) + apply (wp gets_apply_ev') + defer + apply (wp hoare_drop_imps) + apply (rule conjI) + apply assumption + apply simp + apply (auto simp: reads_equiv_g_def globals_equiv_def opt_map_def ptes_of_def obind_def) + done + +lemma dmo_no_mem_globals_equiv: + "\ \P. f \\ms. P (underlying_memory ms)\; + \P. f \\ms. P (device_state ms)\; + \P. f \\ms. P (exclusive_state ms)\ \ + \ do_machine_op f \globals_equiv s\" + unfolding do_machine_op_def + apply (wp | simp add: split_def)+ + apply atomize + apply (erule_tac x="(=) (underlying_memory (machine_state sa))" in allE) + apply (erule_tac x="(=) (device_state (machine_state sa))" in allE) + apply (fastforce simp: valid_def globals_equiv_def idle_equiv_def) + done + +lemma dmo_mol_globals_equiv[wp]: + "do_machine_op (machine_op_lift f) \globals_equiv s\" + by (wpsimp wp: dmo_no_mem_globals_equiv simp: machine_op_lift_def machine_rest_lift_def) + +lemma mol_globals_equiv: + "machine_op_lift mop \\ms. globals_equiv st (s\machine_state := ms\)\" + unfolding machine_op_lift_def + apply (simp add: machine_rest_lift_def split_def) + apply wp + apply (clarsimp simp: globals_equiv_def idle_equiv_def) + done + +lemma storeWord_globals_equiv: + "storeWord p v \\ms. globals_equiv st (s\machine_state := ms\)\" + unfolding storeWord_def + apply (simp add: is_aligned_mask[symmetric]) + apply wp + apply (clarsimp simp: globals_equiv_def idle_equiv_def) + done + +lemma dmo_clearMemory_globals_equiv[Retype_IF_assms]: + "do_machine_op (clearMemory ptr (2 ^ bits)) \globals_equiv s\" + apply (simp add: do_machine_op_def clearMemory_def split_def) + apply wpsimp + apply (erule use_valid) + by (wpsimp wp: mapM_x_wp' storeWord_globals_equiv mol_globals_equiv)+ + +lemma dmo_freeMemory_globals_equiv[Retype_IF_assms]: + "do_machine_op (freeMemory ptr bits) \globals_equiv s\" + apply (rule hoare_pre) + apply (simp add: do_machine_op_def freeMemory_def split_def) + apply (wp) + apply clarsimp + apply (erule use_valid) + apply (wp mapM_x_wp' storeWord_globals_equiv mol_globals_equiv) + apply (simp_all) + done + +lemma init_arch_objects_reads_respects_g: + "reads_respects_g aag l \ (init_arch_objects new_type dev ptr num_objects obj_sz refs)" + unfolding init_arch_objects_def cleanCacheRange_RAM_def cleanCacheRange_PoU_def + by (wpsimp wp: reads_respects_g dmo_mol_reads_respects doesnt_touch_globalsI mapM_x_ev mapM_x_wp_inv) + +(* FIXME: cleanup this proof *) +lemma retype_region_globals_equiv[Retype_IF_assms]: + notes blah[simp del] = atLeastAtMost_iff atLeastatMost_subset_iff atLeastLessThan_iff + Int_atLeastAtMost atLeastatMost_empty_iff split_paired_Ex + shows + "\globals_equiv s and invs + and (\s. \i. cte_wp_at (\c. c = UntypedCap dev (p && ~~ mask sz) sz i) slot s \ + (i \ unat (p && mask sz) \ pspace_no_overlap_range_cover p sz s)) + and K (range_cover p sz (obj_bits_api type o_bits) num \ 0 < num)\ + retype_region p num o_bits type dev + \\_. globals_equiv s\" + apply (simp only: retype_region_def foldr_upd_app_if fun_app_def K_bind_def) + apply (wp |simp)+ + apply clarsimp + apply (simp only: globals_equiv_def) + apply (clarsimp split del: if_split) + apply (subgoal_tac "pspace_no_overlap_range_cover p sz sa") + apply (rule conjI) + apply (clarsimp simp: pspace_no_overlap_def) + apply (drule_tac x="arm_us_global_vspace (arch_state sa)" in spec) + apply (frule valid_global_arch_objs_pt_at[OF invs_valid_global_arch_objs]) + apply (clarsimp simp: invs_def valid_state_def valid_global_objs_def + valid_vso_at_def obj_at_def ptr_add_def) + apply (frule_tac p=x in range_cover_subset) + apply (simp add: blah) + apply simp + apply (frule range_cover_subset') + apply simp + apply (clarsimp simp: p_assoc_help) + apply (drule disjoint_subset_neg1[OF _ subset_thing], rule is_aligned_no_wrap') + apply (clarsimp simp: valid_pspace_def pspace_aligned_def) + apply (drule_tac x="arm_us_global_vspace (arch_state sa)" and A="dom (kheap sa)" in bspec) + apply (simp add: domI) + apply simp + apply (rule word_power_less_1) + apply (simp add: table_size_def ptTranslationBits_def pte_bits_def word_size_bits_def) + apply simp + apply simp + apply simp + apply (drule (1) subset_trans) + apply (erule_tac P="a \ b" for a b in notE) + apply (erule_tac A="{p + c..d}" for c d in subsetD) + apply (simp add: blah) + apply (rule is_aligned_no_wrap') + apply (rule is_aligned_add[OF _ is_aligned_mult_triv2]) + apply (simp add: range_cover_def) + apply (rule word_power_less_1) + apply (simp add: range_cover_def) + apply (erule updates_not_idle) + apply (clarsimp simp: pspace_no_overlap_def) + apply (drule_tac x="idle_thread sa" in spec) + apply (clarsimp simp: invs_def valid_state_def valid_global_objs_def + obj_at_def ptr_add_def valid_idle_def pred_tcb_at_def) + apply (frule_tac p=a in range_cover_subset) + apply (simp add: blah) + apply simp + apply (frule range_cover_subset') + apply simp + apply (clarsimp simp: p_assoc_help) + apply (drule disjoint_subset_neg1[OF _ subset_thing], rule is_aligned_no_wrap') + apply (clarsimp simp: valid_pspace_def pspace_aligned_def) + apply (drule_tac x="idle_thread sa" and A="dom (kheap sa)" in bspec) + apply (simp add: domI) + apply simp + apply uint_arith + apply simp+ + apply (drule (1) subset_trans) + apply (erule_tac P="a \ b" for a b in notE) + apply (erule_tac A="{idle_thread_ptr..d}" for d in subsetD) + apply (simp add: blah) + apply (erule_tac t=idle_thread_ptr in subst) + apply (rule is_aligned_no_wrap') + apply (rule is_aligned_add[OF _ is_aligned_mult_triv2]) + apply (simp add: range_cover_def)+ + apply (auto intro!: cte_wp_at_pspace_no_overlapI simp: range_cover_def word_bits_def)[1] + done + +lemma no_irq_freeMemory[Retype_IF_assms]: + "no_irq (freeMemory ptr sz)" + apply (simp add: freeMemory_def) + apply (wp no_irq_mapM_x no_irq_storeWord) + done + +lemma equiv_asid_detype[Retype_IF_assms]: + "equiv_asid asid s s' \ equiv_asid asid (detype N s) (detype N s')" + by (auto simp: equiv_asid_def) + +end + + +global_interpretation Retype_IF_1?: Retype_IF_1 +proof goal_cases + interpret Arch . + case 1 show ?case + by (unfold_locales; (fact Retype_IF_assms)?) +qed + + +context Arch begin global_naming AARCH64 + +lemma detype_globals_equiv: + "\globals_equiv st and ((\s. arm_us_global_vspace (arch_state s) \ S) and (\s. idle_thread s \ S))\ + modify (detype S) + \\_. globals_equiv st\" + apply (wp) + apply (clarsimp simp: globals_equiv_def detype_def idle_equiv_def tcb_at_def2) + done + +lemma detype_reads_respects_g: + "reads_respects_g aag l ((\s. arm_us_global_vspace (arch_state s) \ S) and (\s. idle_thread s \ S)) + (modify (detype S))" + apply (rule equiv_valid_guard_imp) + apply (rule reads_respects_g) + apply (rule detype_reads_respects) + apply (rule doesnt_touch_globalsI[OF detype_globals_equiv]) + apply simp + done + +lemma delete_objects_reads_respects_g: + "reads_equiv_valid_g_inv (affects_equiv aag l) aag + (\s. arm_us_global_vspace (arch_state s) \ ptr_range p b \ + idle_thread s \ ptr_range p b \ + is_aligned p b \ 2 \ b \ b < word_bits) + (delete_objects p b)" + apply (simp add: delete_objects_def2) + apply (rule equiv_valid_guard_imp) + apply (wp dmo_freeMemory_reads_respects_g) + apply (rule detype_reads_respects_g) + apply wp + apply (unfold ptr_range_def) + apply simp + done + +lemma reset_untyped_cap_reads_respects_g: + "reads_equiv_valid_g_inv (affects_equiv aag (l :: 'a subject_label)) aag + (\s. cte_wp_at is_untyped_cap slot s \ invs s \ ct_active s \ only_timer_irq_inv irq st s \ + is_subject aag (fst slot) \ (descendants_of slot (cdt s) = {})) + (reset_untyped_cap slot)" + apply (simp add: reset_untyped_cap_def cong: if_cong) + apply (rule equiv_valid_guard_imp) + apply (wp set_cap_reads_respects_g dmo_clearMemory_reads_respects_g + | simp add: unless_def when_def split del: if_split)+ + apply (rule_tac I="invs and cte_wp_at (\cp. is_untyped_cap rv + \ (\idx. cp = free_index_update (\_. idx) rv) + \ free_index_of rv \ 2 ^ (bits_of rv) + \ is_subject aag (fst slot)) slot + and pspace_no_overlap (untyped_range rv) + and only_timer_irq_inv irq st + and (\s. descendants_of slot (cdt s) = {})" in mapME_x_ev) + apply (rule equiv_valid_guard_imp) + apply wp + apply (rule reads_respects_g_from_inv) + apply (rule preemption_point_reads_respects[where irq=irq and st=st]) + apply ((wp preemption_point_inv set_cap_reads_respects_g set_untyped_cap_invs_simple + only_timer_irq_inv_pres[where Q=\, OF _ set_cap_domain_sep_inv] + dmo_clearMemory_reads_respects_g + | simp)+) + apply (strengthen empty_descendants_range_in) + apply (wp only_timer_irq_inv_pres[where P=\ and Q=\] no_irq_clearMemory + | simp | wp (once) dmo_wp)+ + apply (clarsimp simp: cte_wp_at_caps_of_state is_cap_simps bits_of_def) + apply (frule(1) caps_of_state_valid) + apply (clarsimp simp: valid_cap_simps cap_aligned_def field_simps + free_index_of_def invs_valid_global_objs) + apply (simp add: aligned_add_aligned is_aligned_shiftl) + apply (clarsimp simp: Kernel_Config.resetChunkBits_def) + apply (rule hoare_pre) + apply (wp preemption_point_inv' set_untyped_cap_invs_simple set_cap_cte_wp_at + set_cap_no_overlap only_timer_irq_inv_pres[where Q=\, OF _ set_cap_domain_sep_inv] + irq_state_independent_A_conjI + | simp)+ + apply (strengthen empty_descendants_range_in) + apply (wp only_timer_irq_inv_pres[where P=\ and Q=\] no_irq_clearMemory + | simp | wp (once) dmo_wp)+ + apply (clarsimp simp: cte_wp_at_caps_of_state is_cap_simps bits_of_def) + apply (frule(1) caps_of_state_valid) + apply (clarsimp simp: valid_cap_simps cap_aligned_def field_simps free_index_of_def) + apply (wp | simp)+ + apply (wp delete_objects_reads_respects_g) + apply (simp add: if_apply_def2) + apply (strengthen invs_valid_global_objs) + apply (wp add: delete_objects_invs_ex hoare_vcg_const_imp_lift + delete_objects_pspace_no_overlap_again + only_timer_irq_inv_pres[where P=\ and Q=\] + delete_objects_valid_arch_state + del: Untyped_AI.delete_objects_pspace_no_overlap + | simp)+ + apply (rule get_cap_reads_respects_g) + apply (wp get_cap_wp) + apply (clarsimp simp: cte_wp_at_caps_of_state is_cap_simps bits_of_def) + apply (frule(1) caps_of_state_valid) + apply (clarsimp simp: valid_cap_simps cap_aligned_def field_simps + free_index_of_def invs_valid_global_objs) + apply (frule valid_global_refsD2, clarsimp+) + apply (clarsimp simp: ptr_range_def[symmetric] global_refs_def descendants_range_def2) + apply (frule if_unsafe_then_capD[OF caps_of_state_cteD], clarsimp+) + apply (strengthen refl[where t=True] refl ex_tupleI[where t=slot] empty_descendants_range_in + | clarsimp)+ + apply (drule ex_cte_cap_protects[OF _ _ _ _ order_refl], erule caps_of_state_cteD) + apply (clarsimp simp: descendants_range_def2 empty_descendants_range_in) + apply clarsimp+ + apply (fastforce dest: invs_valid_global_arch_objs + simp: untyped_min_bits_def ptr_range_def) + done + +(* FIXME AARCH64 IF: delete if unused +lemma retype_region_ret_pt_aligned: + "\K (range_cover ptr sz (obj_bits_api tp us) num_objects)\ + retype_region ptr num_objects us tp dev + \\rv. K (\ref \ set rv. tp = ArchObject PageTableObj \ is_aligned ref pt_bits)\" + apply (rule hoare_strengthen_post) + apply (rule hoare_weaken_pre) + apply (rule retype_region_aligned_for_init) + apply simp + apply (clarsimp simp: obj_bits_api_def default_arch_object_def pt_bits_def pageBits_def) + done + *) + +crunch init_arch_objects + for valid_global_objs[wp]: valid_global_objs + (wp: crunch_wps) + +lemma post_retype_invs_valid_global_objsI: + "post_retype_invs ty rv s \ valid_global_objs s" + by (clarsimp simp: post_retype_invs_def invs_def valid_state_def split: if_split_asm) + +lemma invoke_untyped_reads_respects_g_wcap[Retype_IF_assms]: + notes blah[simp del] = untyped_range.simps usable_untyped_range.simps atLeastAtMost_iff + atLeastatMost_subset_iff atLeastLessThan_iff Int_atLeastAtMost + atLeastatMost_empty_iff split_paired_Ex + shows "reads_respects_g aag (l :: 'a subject_label) + (invs and valid_untyped_inv_wcap ui (Some (UntypedCap dev ptr sz idx)) + and only_timer_irq_inv irq st and ct_active and pas_refined aag + and K (authorised_untyped_inv aag ui)) + (invoke_untyped ui)" + apply (case_tac ui) + apply (rename_tac cslot_ptr reset ptr_base ptr' apiobject_type nat list dev') + apply (case_tac "\ (dev' = dev \ ptr = ptr' && ~~ mask sz)") + (* contradictory *) + apply (rule equiv_valid_guard_imp, rule_tac gen_asm_ev'[where P="\" and Q=False], simp) + apply (clarsimp simp: cte_wp_at_caps_of_state) + apply (simp add: invoke_untyped_def mapM_x_def[symmetric]) + apply (wpsimp wp: mapM_x_ev'' create_cap_reads_respects_g + hoare_vcg_ball_lift init_arch_objects_reads_respects_g)+ + apply (wp retype_region_reads_respects_g[where sz=sz and slot="slot_of_untyped_inv ui"]) + apply (rule_tac Q'="\rvc s. (\x\set rvc. is_subject aag x) \ + (\x\set rvc. is_aligned x (obj_bits_api apiobject_type nat)) \ + ((0::obj_ref) < of_nat (length list)) \ + post_retype_invs apiobject_type rvc s \ + global_refs s \ set rvc = {} \ + (\x\set list. is_subject aag (fst x))" + for sz in hoare_strengthen_post) + apply (wp retype_region_ret_is_subject[where sz=sz, simplified] + retype_region_global_refs_disjoint[where sz=sz] + retype_region_aligned_for_init[where sz=sz] + retype_region_post_retype_invs_spec[where sz=sz]) + apply clarsimp + apply (fastforce simp: global_refs_def obj_bits_api_def + post_retype_invs_valid_arch_stateI + post_retype_invs_valid_global_objsI + pageBits_def default_arch_object_def + intro: post_retype_invs_pspace_alignedI + post_retype_invs_valid_arch_stateI + elim: in_set_zipE) + apply (rule set_cap_reads_respects_g) + apply simp + apply (wp hoare_vcg_ex_lift set_cap_cte_wp_at_cases + hoare_vcg_disj_lift set_cap_no_overlap + set_free_index_invs_UntypedCap + set_untyped_cap_caps_overlap_reserved + set_cap_caps_no_overlap + region_in_kernel_window_preserved) + apply (wp when_ev delete_objects_reads_respects_g hoare_vcg_disj_lift + delete_objects_pspace_no_overlap + delete_objects_descendants_range_in + delete_objects_caps_no_overlap + region_in_kernel_window_preserved + get_cap_reads_respects_g get_cap_wp + | simp split del: if_split)+ + apply (rule reset_untyped_cap_reads_respects_g[where irq=irq and st=st]) + apply (rule_tac P="authorised_untyped_inv aag ui \ + (\p \ ptr_range ptr sz. is_subject aag p)" in hoare_gen_asmE) + apply (rule validE_validE_R, + rule_tac E'="\\" + and Q'="\_. invs and valid_untyped_inv_wcap ui (Some (UntypedCap dev ptr sz (If reset 0 idx))) + and ct_active + and (\s. reset \ pspace_no_overlap {ptr .. ptr + 2 ^ sz - 1} s)" + in hoare_strengthen_postE) + apply (rule hoare_pre, wp whenE_wp) + apply (rule validE_validE_R, rule hoare_strengthen_postE, rule reset_untyped_cap_invs_etc) + apply (clarsimp simp only: if_True simp_thms, intro conjI, assumption+) + apply simp + apply assumption + apply clarify + apply (frule(2) invoke_untyped_proofs.intro) + apply (clarsimp simp: cte_wp_at_caps_of_state bits_of_def + free_index_of_def untyped_range_def + if_split[where P="\x. x \ unat v" for v] + split del: if_split) + apply (frule(1) valid_global_refsD2[OF _ invs_valid_global_refs]) + apply (strengthen refl) + apply (strengthen invs_valid_global_objs invs_arch_state) + apply (clarsimp simp: authorised_untyped_inv_def conj_comms invoke_untyped_proofs.simps) + apply (simp add: arg_cong[OF mask_out_sub_mask, where f="\y. x - y" for x] + field_simps invoke_untyped_proofs.idx_le_new_offs + invoke_untyped_proofs.idx_compare' untyped_range_def) + apply (strengthen caps_region_kernel_window_imp[mk_strg I E]) + apply (simp add: invoke_untyped_proofs.simps untyped_range_def invs_cap_refs_in_kernel_window + atLeastatMost_subset_iff[where b=x and d=x for x] + cong: conj_cong split del: if_split) + apply (intro conjI) + (* mostly clagged from Untyped_AI *) + apply (simp add: atLeastatMost_subset_iff word_and_le2) + apply (case_tac reset) + apply (clarsimp elim!: pspace_no_overlap_subset del: subsetI + simp: blah word_and_le2) + apply (drule invoke_untyped_proofs.ps_no_overlap) + apply (simp add: field_simps) + apply (simp add: Int_commute, erule disjoint_subset2[rotated]) + apply (simp add: atLeastatMost_subset_iff word_and_le2) + apply (clarsimp dest!: invoke_untyped_proofs.idx_le_new_offs) + apply (simp add: ptr_range_def) + apply (erule ball_subset, rule range_subsetI[OF _ order_refl]) + apply (simp add: word_and_le2) + apply (erule order_trans[OF invoke_untyped_proofs.subset_stuff]) + apply (simp add: atLeastatMost_subset_iff word_and_le2) + apply (drule invoke_untyped_proofs.usable_range_disjoint) + apply (clarsimp simp: field_simps mask_out_sub_mask shiftl_t2n) + apply blast + apply (clarsimp simp: cte_wp_at_caps_of_state authorised_untyped_inv_def) + apply (strengthen refl) + apply (frule(1) cap_auth_caps_of_state) + apply (simp add: aag_cap_auth_def untyped_range_def + aag_has_Control_iff_owns ptr_range_def[symmetric]) + apply (erule disjE, simp_all)[1] + done + +lemma delete_objects_globals_equiv[wp]: + "\globals_equiv st and (\s. is_aligned p b \ 2 \ b \ b < word_bits \ + arm_us_global_vspace (arch_state s) \ ptr_range p b \ + idle_thread s \ ptr_range p b)\ + delete_objects p b + \\_. globals_equiv st\" + apply (simp add: delete_objects_def) + apply (wp detype_globals_equiv dmo_freeMemory_globals_equiv) + apply (clarsimp simp: ptr_range_def)+ + done + +lemma reset_untyped_cap_globals_equiv: + "\globals_equiv st and invs and cte_wp_at is_untyped_cap slot + and ct_active and (\s. descendants_of slot (cdt s) = {})\ + reset_untyped_cap slot + \\_. globals_equiv st\" + apply (simp add: reset_untyped_cap_def cong: if_cong) + apply (rule hoare_pre) + apply (wp set_cap_globals_equiv dmo_clearMemory_globals_equiv + preemption_point_inv | simp add: unless_def)+ + apply (rule valid_validE) + apply (rule_tac P="cap_aligned cap \ is_untyped_cap cap" in hoare_gen_asm) + apply (rule_tac Q'="\_ s. valid_global_objs s \ valid_arch_state s \ globals_equiv st s" + in hoare_strengthen_post) + apply (rule validE_valid, rule mapME_x_wp') + apply (rule hoare_pre) + apply (wp set_cap_globals_equiv dmo_clearMemory_globals_equiv + preemption_point_inv | simp add: if_apply_def2)+ + apply (clarsimp simp: is_cap_simps ptr_range_def[symmetric] + cap_aligned_def bits_of_def free_index_of_def) + apply (clarsimp simp: Kernel_Config.resetChunkBits_def) + apply (strengthen invs_valid_global_objs invs_arch_state) + apply (wp delete_objects_invs_ex hoare_vcg_const_imp_lift get_cap_wp)+ + apply (clarsimp simp: cte_wp_at_caps_of_state descendants_range_def2 is_cap_simps bits_of_def + split del: if_split) + apply (frule caps_of_state_valid_cap, clarsimp+) + apply (clarsimp simp: valid_cap_simps cap_aligned_def untyped_min_bits_def) + apply (frule valid_global_refsD2, clarsimp+) + apply (clarsimp simp: ptr_range_def[symmetric] global_refs_def) + apply (strengthen empty_descendants_range_in) + apply (cases slot) + apply (fastforce) + done + +lemma init_arch_objects_globals_equiv[wp]: + "init_arch_objects tp dev ptr n us refs \globals_equiv st\" + unfolding init_arch_objects_def cleanCacheRange_RAM_def cleanCacheRange_PoU_def + by (wpsimp wp: mapM_x_wp_inv) + +lemma invoke_untyped_globals_equiv: + notes blah[simp del] = untyped_range.simps usable_untyped_range.simps atLeastAtMost_iff + atLeastatMost_subset_iff atLeastLessThan_iff Int_atLeastAtMost + atLeastatMost_empty_iff split_paired_Ex + shows "\globals_equiv st and invs and valid_untyped_inv ui and ct_active\ + invoke_untyped ui + \\_. globals_equiv st\" + apply (rule hoare_name_pre_state) + apply (rule hoare_pre, rule invoke_untyped_Q) + apply (wp create_cap_globals_equiv) + apply auto[1] + apply wpsimp + apply (rule hoare_pre, wp retype_region_globals_equiv[where slot="slot_of_untyped_inv ui"]) + apply (clarsimp simp: cte_wp_at_caps_of_state) + apply (strengthen refl) + apply simp + apply (wp set_cap_globals_equiv) + apply auto[1] + apply (wp reset_untyped_cap_globals_equiv) + apply (cases ui, clarsimp simp: cte_wp_at_caps_of_state) + done + +end + + +global_interpretation Retype_IF_2?: Retype_IF_2 +proof goal_cases + interpret Arch . + case 1 show ?case + by (unfold_locales; (fact Retype_IF_assms)?) +qed + + +requalify_facts + AARCH64.reset_untyped_cap_reads_respects_g + AARCH64.reset_untyped_cap_globals_equiv + AARCH64.invoke_untyped_globals_equiv + AARCH64.storeWord_globals_equiv + +end diff --git a/proof/infoflow/AARCH64/ArchScheduler_IF.thy b/proof/infoflow/AARCH64/ArchScheduler_IF.thy new file mode 100644 index 0000000000..25892a6bd5 --- /dev/null +++ b/proof/infoflow/AARCH64/ArchScheduler_IF.thy @@ -0,0 +1,427 @@ +(* + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +theory ArchScheduler_IF +imports Scheduler_IF + +begin + +context Arch begin global_naming AARCH64 + +named_theorems Scheduler_IF_assms + +definition arch_globals_equiv_scheduler :: "kheap \ kheap \ arch_state \ arch_state \ bool" where + "arch_globals_equiv_scheduler kh kh' as as' \ + arm_us_global_vspace as = arm_us_global_vspace as' \ kh (arm_us_global_vspace as) = kh' (arm_us_global_vspace as)" + +definition + "arch_scheduler_affects_equiv s s' \ True" + +lemma arch_globals_equiv_from_scheduler[Scheduler_IF_assms]: + "\ arch_globals_equiv_scheduler (kheap s) (kheap s') (arch_state s) (arch_state s'); + cur_thread s' \ idle_thread s \ arch_scheduler_affects_equiv s s' \ + \ arch_globals_equiv (cur_thread s') (idle_thread s) (kheap s) (kheap s') + (arch_state s) (arch_state s') (machine_state s) (machine_state s')" + by (clarsimp simp: arch_globals_equiv_scheduler_def arch_scheduler_affects_equiv_def) + +lemma arch_globals_equiv_scheduler_refl[Scheduler_IF_assms]: + "arch_globals_equiv_scheduler (kheap s) (kheap s) (arch_state s) (arch_state s)" + by (simp add: idle_equiv_refl arch_globals_equiv_scheduler_def) + +lemma arch_globals_equiv_scheduler_sym[Scheduler_IF_assms]: + "arch_globals_equiv_scheduler (kheap s) (kheap s') (arch_state s) (arch_state s') + \ arch_globals_equiv_scheduler (kheap s') (kheap s) (arch_state s') (arch_state s)" + by (auto simp: arch_globals_equiv_scheduler_def) + +lemma arch_globals_equiv_scheduler_trans[Scheduler_IF_assms]: + "\ arch_globals_equiv_scheduler (kheap s) (kheap s') (arch_state s) (arch_state s'); + arch_globals_equiv_scheduler (kheap s') (kheap s'') (arch_state s') (arch_state s'') \ + \ arch_globals_equiv_scheduler (kheap s) (kheap s'') (arch_state s) (arch_state s'')" + by (clarsimp simp: arch_globals_equiv_scheduler_def) + +lemma arch_scheduler_affects_equiv_trans[Scheduler_IF_assms, elim]: + "\ arch_scheduler_affects_equiv s s'; arch_scheduler_affects_equiv s' s'' \ + \ arch_scheduler_affects_equiv s s''" + by (simp add: arch_scheduler_affects_equiv_def) + +lemma arch_scheduler_affects_equiv_sym[Scheduler_IF_assms, elim]: + "arch_scheduler_affects_equiv s s' \ arch_scheduler_affects_equiv s' s" + by (simp add: arch_scheduler_affects_equiv_def) + +lemma arch_scheduler_affects_equiv_sa_update[Scheduler_IF_assms, simp]: + "arch_scheduler_affects_equiv (scheduler_action_update f s) s' = + arch_scheduler_affects_equiv s s'" + "arch_scheduler_affects_equiv s (scheduler_action_update f s') = + arch_scheduler_affects_equiv s s'" + by (auto simp: arch_scheduler_affects_equiv_def) + +lemma equiv_asid_cur_thread_update[Scheduler_IF_assms, simp]: + "equiv_asid asid (cur_thread_update f s) s' = equiv_asid asid s s'" + "equiv_asid asid s (cur_thread_update f s') = equiv_asid asid s s'" + by (auto simp: equiv_asid_def) + +lemma arch_scheduler_affects_equiv_ready_queues_update[Scheduler_IF_assms, simp]: + "arch_scheduler_affects_equiv (ready_queues_update f s) s' = arch_scheduler_affects_equiv s s'" + "arch_scheduler_affects_equiv s (ready_queues_update f s') = arch_scheduler_affects_equiv s s'" + by (auto simp: arch_scheduler_affects_equiv_def) + +crunch arch_switch_to_thread, arch_switch_to_idle_thread + for idle_thread[Scheduler_IF_assms, wp]: "\s :: det_state. P (idle_thread s)" + (wp: crunch_wps simp: crunch_simps) + +declare arch_prepare_next_domain_idle_thread[Scheduler_IF_assms] + +lemma arch_prepare_next_domain_kheap[Scheduler_IF_assms,wp]: + "arch_prepare_next_domain \\s :: det_state. P (kheap s)\" + sorry + +lemma arch_switch_to_thread_kheap[Scheduler_IF_assms,wp]: + "arch_switch_to_thread t \\s :: det_state. P (kheap s)\" + sorry + +lemma arch_switch_to_idle_thread_kheap[Scheduler_IF_assms,wp]: + "arch_switch_to_idle_thread \\s :: det_state. P (kheap s)\" + sorry + +crunch arch_switch_to_thread, arch_switch_to_idle_thread + for cur_domain[Scheduler_IF_assms, wp]: "\s. P (cur_domain s)" + and domain_fields[Scheduler_IF_assms, wp]: "domain_fields P" + +lemma arch_switch_to_idle_thread_globals_equiv[Scheduler_IF_assms,wp]: + "arch_switch_to_idle_thread \globals_equiv st\" + sorry + +lemma arch_switch_to_idle_thread_states_equiv_for[Scheduler_IF_assms,wp]: + "arch_switch_to_idle_thread \states_equiv_for P Q R S st\" + sorry + +lemma arch_switch_to_idle_thread_work_units_completed[Scheduler_IF_assms,wp]: + "arch_switch_to_idle_thread \\s. P (work_units_completed s)\" + by wp + +crunch arch_activate_idle_thread + for cur_domain[Scheduler_IF_assms, wp]: "\s. P (cur_domain s)" + and idle_thread[Scheduler_IF_assms, wp]: "\s. P (idle_thread s)" + and irq_state_of_state[Scheduler_IF_assms, wp]: "\s. P (irq_state_of_state s)" + and domain_fields[Scheduler_IF_assms, wp]: "domain_fields P" + +lemma arch_scheduler_affects_equiv_cur_thread_update[Scheduler_IF_assms, simp]: + "arch_scheduler_affects_equiv (cur_thread_update f s) s' = arch_scheduler_affects_equiv s s'" + "arch_scheduler_affects_equiv s (cur_thread_update f s') = arch_scheduler_affects_equiv s s'" + by (auto simp: arch_scheduler_affects_equiv_def) + +lemma equiv_asid_domain_time_update[Scheduler_IF_assms, simp]: + "equiv_asid asid (domain_time_update f s) s' = equiv_asid asid s s'" + "equiv_asid asid s (domain_time_update f s') = equiv_asid asid s s'" + by (auto simp: equiv_asid_def) + +lemma arch_scheduler_affects_equiv_domain_time_update[Scheduler_IF_assms, simp]: + "arch_scheduler_affects_equiv (domain_time_update f s) s' = arch_scheduler_affects_equiv s s'" + "arch_scheduler_affects_equiv s (domain_time_update f s') = arch_scheduler_affects_equiv s s'" + by (auto simp: arch_scheduler_affects_equiv_def) + +crunch ackInterrupt + for irq_state[Scheduler_IF_assms, wp]: "\s. P (irq_state s)" + +lemma thread_set_context_globals_equiv[Scheduler_IF_assms]: + "\(\s. t = idle_thread s \ tc = idle_context s) and invs and globals_equiv st\ + thread_set (tcb_arch_update (arch_tcb_context_set tc)) t + \\rv. globals_equiv st\" + apply (clarsimp simp: thread_set_def) + apply (wpsimp wp: set_object_wp) + apply (subgoal_tac "t \ arm_us_global_vspace (arch_state s)") + apply (clarsimp simp: idle_equiv_def globals_equiv_def tcb_at_def2 get_tcb_def idle_context_def) + apply (clarsimp split: option.splits kernel_object.splits) + apply (fastforce simp: get_tcb_def obj_at_def valid_arch_state_def + dest: valid_global_arch_objs_pt_at invs_arch_state) + done + +lemma arch_scheduler_affects_equiv_update[Scheduler_IF_assms]: + "arch_scheduler_affects_equiv st s + \ arch_scheduler_affects_equiv st (s\kheap := (kheap s)(x \ TCB y')\)" + by (clarsimp simp: arch_scheduler_affects_equiv_def) + +lemma equiv_asid_equiv_update[Scheduler_IF_assms]: + "\ get_tcb x s = Some y; equiv_asid asid st s \ + \ equiv_asid asid st (s\kheap := (kheap s)(x \ TCB y')\)" + by (clarsimp simp: equiv_asid_def obj_at_def get_tcb_def) + +end + + +requalify_consts + AARCH64.arch_globals_equiv_scheduler + AARCH64.arch_scheduler_affects_equiv + +global_interpretation Scheduler_IF_1?: + Scheduler_IF_1 arch_globals_equiv_scheduler arch_scheduler_affects_equiv +proof goal_cases + interpret Arch . + case 1 show ?case + by (unfold_locales; (fact Scheduler_IF_assms)?) +qed + + +context Arch begin global_naming AARCH64 + +definition swap_things where + "swap_things s t \ + t\machine_state := underlying_memory_update + (\m a. if a \ scheduler_affects_globals_frame t + then underlying_memory (machine_state s) a + else m a) + (machine_state t)\ + \cur_thread := cur_thread s\" + +lemma globals_equiv_scheduler_inv'[Scheduler_IF_assms]: + "(\st. \P and globals_equiv st\ f \\_. globals_equiv st\) + \ \P and globals_equiv_scheduler s\ f \\_. globals_equiv_scheduler s\" + apply atomize + apply (rule use_spec) + apply (simp add: spec_valid_def) + apply (erule_tac x="(swap_things sa s)" in allE) + apply (rule_tac Q'="\r st. globals_equiv (swap_things sa s) st" in hoare_strengthen_post) + apply (rule hoare_pre) + apply assumption + apply (clarsimp simp: globals_equiv_def swap_things_def globals_equiv_scheduler_def + arch_globals_equiv_scheduler_def arch_scheduler_affects_equiv_def)+ + done + +lemma arch_switch_to_thread_globals_equiv_scheduler[Scheduler_IF_assms]: + "\invs and globals_equiv_scheduler sta\ + arch_switch_to_thread thread + \\_. globals_equiv_scheduler sta\" + unfolding arch_switch_to_thread_def storeWord_def + apply (wpsimp wp: dmo_wp modify_wp thread_get_wp' globals_equiv_scheduler_inv'[where P="\"]) + sorry + +crunch arch_activate_idle_thread + for silc_dom_equiv[Scheduler_IF_assms, wp]: "silc_dom_equiv aag st" + and scheduler_affects_equiv[Scheduler_IF_assms, wp]: "scheduler_affects_equiv aag l st" + +lemma set_vm_root_arch_scheduler_affects_equiv[wp]: + "set_vm_root tcb \arch_scheduler_affects_equiv st\" + unfolding arch_scheduler_affects_equiv_def by wpsimp + +lemmas set_vm_root_scheduler_affects_equiv[wp] = + scheduler_affects_equiv_unobservable[OF set_vm_root_states_equiv_for + set_vm_root_cur_domain _ _ _ set_vm_root_it + set_vm_root_arch_scheduler_affects_equiv] + +lemma set_vm_root_reads_respects_scheduler[wp]: + "reads_respects_scheduler aag l \ (set_vm_root thread)" + apply (rule reads_respects_scheduler_unobservable'[OF scheduler_equiv_lift' + [OF globals_equiv_scheduler_inv']]) + apply (wp silc_dom_equiv_states_equiv_lift set_vm_root_states_equiv_for | simp)+ + sorry + +lemma store_cur_thread_fragment_midstrength_reads_respects: + "equiv_valid (scheduler_equiv aag) (midstrength_scheduler_affects_equiv aag l) + (scheduler_affects_equiv aag l) invs + (do x \ modify (cur_thread_update (\_. t)); + set_scheduler_action resume_cur_thread + od)" + apply (rule equiv_valid_guard_imp) + apply (rule equiv_valid_weaken_pre) + apply (rule ev_asahi_ex_to_full_fragement) + apply (auto simp: midstrength_scheduler_affects_equiv_def asahi_scheduler_affects_equiv_def + asahi_ex_scheduler_affects_equiv_def states_equiv_for_def equiv_for_def + arch_scheduler_affects_equiv_def equiv_asids_def equiv_asid_def + scheduler_globals_frame_equiv_def + simp del: split_paired_All) + done + +lemma arch_switch_to_thread_globals_equiv_scheduler': + "\invs and globals_equiv_scheduler sta\ + set_vm_root t + \\_. globals_equiv_scheduler sta\" + by (rule globals_equiv_scheduler_inv', wpsimp) + +lemma arch_switch_to_thread_reads_respects_scheduler[wp]: + "reads_respects_scheduler aag l + ((\s. pasObjectAbs aag t \ pasDomainAbs aag (cur_domain s)) and invs) + (arch_switch_to_thread t)" + apply (rule reads_respects_scheduler_cases) + apply (simp add: arch_switch_to_thread_def) + apply wp + apply (clarsimp simp: scheduler_equiv_def globals_equiv_scheduler_def) + sorry + +lemmas globals_equiv_scheduler_inv = globals_equiv_scheduler_inv'[where P="\",simplified] + +lemma arch_switch_to_thread_midstrength_reads_respects_scheduler[Scheduler_IF_assms, wp]: + assumes domains_distinct[wp]: "pas_domains_distinct aag" + shows "midstrength_reads_respects_scheduler aag l + (invs and pas_refined aag and (\s. pasObjectAbs aag t \ pasDomainAbs aag (cur_domain s))) + (do _ <- arch_switch_to_thread t; + _ <- modify (cur_thread_update (\_. t)); + modify (scheduler_action_update (\_. resume_cur_thread)) + od)" + apply (rule equiv_valid_guard_imp) + apply (rule midstrength_reads_respects_scheduler_cases[ + where Q="(invs and pas_refined aag and + (\s. pasObjectAbs aag t \ pasDomainAbs aag (cur_domain s)))", + OF domains_distinct]) + sorry +(* + apply (simp add: arch_switch_to_thread_def bind_assoc) + apply (rule bind_ev_general) + apply (fold set_scheduler_action_def) + apply (rule store_cur_thread_fragment_midstrength_reads_respects) + apply (rule_tac P="\" and P'="\" in equiv_valid_inv_unobservable) + apply (rule hoare_pre) + apply (rule scheduler_equiv_lift'[where P=\]) + apply (wp globals_equiv_scheduler_inv silc_dom_lift | simp)+ + apply (wp midstrength_scheduler_affects_equiv_unobservable set_vm_root_states_equiv_for + | simp)+ + apply (wp cur_thread_update_not_subject_reads_respects_scheduler | simp | fastforce)+ + done +*) + +lemma arch_switch_to_idle_thread_globals_equiv_scheduler[Scheduler_IF_assms, wp]: + "\invs and globals_equiv_scheduler sta\ + arch_switch_to_idle_thread + \\_. globals_equiv_scheduler sta\" + unfolding arch_switch_to_idle_thread_def storeWord_def + apply (wp dmo_wp modify_wp thread_get_wp' arch_switch_to_thread_globals_equiv_scheduler') + sorry + +lemma arch_switch_to_idle_thread_unobservable[Scheduler_IF_assms]: + "\(\s. pasDomainAbs aag (cur_domain s) \ reads_scheduler aag l = {}) and + scheduler_affects_equiv aag l st and (\s. cur_domain st = cur_domain s) and invs\ + arch_switch_to_idle_thread + \\_ s. scheduler_affects_equiv aag l st s\" + apply (simp add: arch_switch_to_idle_thread_def) + apply wp + apply (clarsimp simp add: scheduler_equiv_def domain_fields_equiv_def invs_def valid_state_def) + sorry + +lemma arch_switch_to_thread_unobservable[Scheduler_IF_assms]: + "\(\s. \ reads_scheduler_cur_domain aag l s) and + scheduler_affects_equiv aag l st and (\s. cur_domain st = cur_domain s) and invs\ + arch_switch_to_thread t + \\_ s. scheduler_affects_equiv aag l st s\" + apply (simp add: arch_switch_to_thread_def) + apply (wp set_vm_root_scheduler_affects_equiv | simp)+ + sorry + +(* Can split, but probably more effort to generalise *) +lemma next_domain_midstrength_equiv_scheduler[Scheduler_IF_assms]: + "equiv_valid (scheduler_equiv aag) (weak_scheduler_affects_equiv aag l) + (midstrength_scheduler_affects_equiv aag l) \ next_domain" + apply (simp add: next_domain_def) + apply (subst is_extended.dxo_eq) + apply (clarsimp simp: is_extended_def is_extended'_def is_extended_axioms_def) + apply wpsimp + apply (clarsimp simp: modify_modify) + apply (rule ev_modify) + apply (clarsimp simp: equiv_for_def equiv_asid_def equiv_asids_def Let_def scheduler_equiv_def + globals_equiv_scheduler_def silc_dom_equiv_def domain_fields_equiv_def + weak_scheduler_affects_equiv_def midstrength_scheduler_affects_equiv_def + states_equiv_for_def idle_equiv_def) + done + +lemma resetTimer_irq_state[wp]: + "resetTimer \\s. P (irq_state s)\" + apply (simp add: resetTimer_def machine_op_lift_def machine_rest_lift_def) + apply (wp | wpc| simp)+ + done + +lemma dmo_resetTimer_reads_respects_scheduler[Scheduler_IF_assms]: + "reads_respects_scheduler aag l \ (do_machine_op resetTimer)" + apply (rule reads_respects_scheduler_unobservable) + apply (rule scheduler_equiv_lift) + apply (simp add: globals_equiv_scheduler_def[abs_def] idle_equiv_def) + apply (wpsimp wp: dmo_wp) + apply ((wp silc_dom_lift dmo_wp | simp)+)[5] + apply (rule scheduler_affects_equiv_unobservable) + apply (simp add: states_equiv_for_def[abs_def] equiv_for_def equiv_asids_def equiv_asid_def) + apply (rule hoare_pre) + apply (wp | simp add: arch_scheduler_affects_equiv_def | wp dmo_wp)+ + done + +lemma ackInterrupt_reads_respects_scheduler[Scheduler_IF_assms]: + "reads_respects_scheduler aag l \ (do_machine_op (ackInterrupt irq))" + apply (rule reads_respects_scheduler_unobservable) + apply (rule scheduler_equiv_lift) + apply (simp add: globals_equiv_scheduler_def[abs_def] idle_equiv_def) + apply (rule hoare_pre) + apply wps + apply (wp dmo_wp ackInterrupt_irq_masks | simp add:no_irq_def)+ + apply clarsimp + apply ((wp silc_dom_lift dmo_wp | simp)+)[5] + apply (rule scheduler_affects_equiv_unobservable) + apply (simp add: states_equiv_for_def[abs_def] equiv_for_def equiv_asids_def equiv_asid_def) + apply (rule hoare_pre) + apply wps + apply (wp dmo_wp | simp add: arch_scheduler_affects_equiv_def ackInterrupt_def)+ + done + +lemma thread_set_scheduler_affects_equiv[Scheduler_IF_assms, wp]: + "\(\s. x \ idle_thread s \ pasObjectAbs aag x \ reads_scheduler aag l) and + (\s. x = idle_thread s \ tc = idle_context s) and scheduler_affects_equiv aag l st\ + thread_set (tcb_arch_update (arch_tcb_context_set tc)) x + \\_. scheduler_affects_equiv aag l st\" + apply (simp add: thread_set_def) + apply (wp set_object_wp) + apply (intro impI conjI) + apply (case_tac "x \ idle_thread s",simp_all) + apply (clarsimp simp: scheduler_affects_equiv_def get_tcb_def scheduler_globals_frame_equiv_def + split: option.splits kernel_object.splits) + apply (clarsimp simp: arch_scheduler_affects_equiv_def) + apply (elim states_equiv_forE equiv_forE) + apply (rule states_equiv_forI,simp_all add: equiv_for_def equiv_asids_def equiv_asid_def) + apply (clarsimp simp: obj_at_def) + apply (clarsimp simp: idle_context_def get_tcb_def + split: option.splits kernel_object.splits) + apply (subst arch_tcb_update_aux) + apply simp + apply (subgoal_tac "s = (s\kheap := (kheap s)(idle_thread s \ TCB y)\)", simp) + apply (rule state.equality) + apply (rule ext) + apply simp+ + done + +lemma set_object_reads_respects_scheduler[Scheduler_IF_assms, wp]: + "reads_respects_scheduler aag l \ (set_object ptr obj)" + unfolding equiv_valid_def2 equiv_valid_2_def + by (auto simp: set_object_def bind_def get_def put_def return_def get_object_def assert_def + fail_def gets_def scheduler_equiv_def domain_fields_equiv_def equiv_for_def + globals_equiv_scheduler_def arch_globals_equiv_scheduler_def silc_dom_equiv_def + scheduler_affects_equiv_def arch_scheduler_affects_equiv_def + scheduler_globals_frame_equiv_def identical_kheap_updates_def + intro: states_equiv_for_identical_kheap_updates idle_equiv_identical_kheap_updates) + +lemma arch_activate_idle_thread_reads_respects_scheduler[Scheduler_IF_assms, wp]: + "reads_respects_scheduler aag l \ (arch_activate_idle_thread rv)" + unfolding arch_activate_idle_thread_def by wpsimp + +lemma arch_prepare_next_domain_ev[Scheduler_IF_assms]: + "equiv_valid_inv I A (\_. True) arch_prepare_next_domain" + unfolding arch_prepare_next_domain_def + apply wp + sorry + +lemma arch_prepare_next_domain_globals_equiv_scheduler[Scheduler_IF_assms]: + "arch_prepare_next_domain \globals_equiv_scheduler st\" + sorry + +end + + +global_interpretation Scheduler_IF_2?: + Scheduler_IF_2 arch_globals_equiv_scheduler arch_scheduler_affects_equiv +proof goal_cases + interpret Arch . + case 1 show ?case + by (unfold_locales; (fact Scheduler_IF_assms)?) +qed + + +hide_fact Scheduler_IF_2.globals_equiv_scheduler_inv' +requalify_facts AARCH64.globals_equiv_scheduler_inv' + +end diff --git a/proof/infoflow/AARCH64/ArchSyscall_IF.thy b/proof/infoflow/AARCH64/ArchSyscall_IF.thy new file mode 100644 index 0000000000..7eeb624a42 --- /dev/null +++ b/proof/infoflow/AARCH64/ArchSyscall_IF.thy @@ -0,0 +1,250 @@ +(* + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +theory ArchSyscall_IF +imports Syscall_IF +begin + +context Arch begin global_naming AARCH64 + +named_theorems Syscall_IF_assms + +lemma globals_equiv_irq_state_update[Syscall_IF_assms, simp]: + "globals_equiv st (s\machine_state := + machine_state s \irq_state := f (irq_state (machine_state s))\\) = + globals_equiv st s" + by (auto simp: globals_equiv_def idle_equiv_def) + +lemma thread_set_globals_equiv'[Syscall_IF_assms]: + "\globals_equiv s and valid_arch_state and (\s. tptr \ idle_thread s)\ + thread_set f tptr + \\_. globals_equiv s\" + unfolding thread_set_def + apply (wp set_object_globals_equiv) + apply simp + apply (fastforce simp: obj_at_def get_tcb_def valid_arch_state_def + dest: valid_global_arch_objs_pt_at) + done + +lemma sts_authorised_for_globals_inv[Syscall_IF_assms]: + "set_thread_state d f \authorised_for_globals_inv oper\" + unfolding authorised_for_globals_inv_def authorised_for_globals_arch_inv_def + authorised_for_globals_page_table_inv_def authorised_for_globals_page_inv_def + apply (case_tac oper) + apply (wp | simp)+ + apply (rename_tac arch_invocation) + apply (case_tac arch_invocation) + apply simp + apply (rename_tac page_table_invocation) + apply (case_tac page_table_invocation) + apply wpsimp+ + apply (rename_tac page_invocation) + apply (case_tac page_invocation) + apply (simp | wp hoare_vcg_ex_lift)+ + done + +lemma dmo_maskInterrupt_globals_equiv[Syscall_IF_assms, wp]: + "do_machine_op (maskInterrupt b irq) \globals_equiv s\" + unfolding maskInterrupt_def + apply (rule dmo_no_mem_globals_equiv) + apply (wp modify_wp | simp)+ + done + +lemma dmo_ackInterrupt_globals_equiv[Syscall_IF_assms, wp]: + "do_machine_op (ackInterrupt irq) \globals_equiv s\" + unfolding ackInterrupt_def by wpsimp + +lemma dmo_resetTimer_globals_equiv[Syscall_IF_assms, wp]: + "do_machine_op resetTimer \globals_equiv s\" + unfolding resetTimer_def by (rule dmo_mol_globals_equiv) + +lemma arch_mask_irq_signal_globals_equiv[Syscall_IF_assms, wp]: + "arch_mask_irq_signal irq \globals_equiv st\" + by wpsimp + +lemma handle_reserved_irq_globals_equiv[Syscall_IF_assms, wp]: + "handle_reserved_irq irq \globals_equiv st\" + unfolding handle_reserved_irq_def + sorry + +lemma handle_vm_fault_reads_respects[Syscall_IF_assms]: + "reads_respects aag l (K (is_subject aag thread)) (handle_vm_fault thread vmfault_type)" + unfolding handle_vm_fault_def + sorry + +lemma handle_hypervisor_fault_reads_respects[Syscall_IF_assms]: + "reads_respects aag l \ (handle_hypervisor_fault thread hypfault_type)" + apply (cases hypfault_type; wpsimp split_del: if_split) + sorry + +lemma handle_vm_fault_globals_equiv[Syscall_IF_assms]: + "\globals_equiv st and valid_arch_state and (\s. thread \ idle_thread s)\ + handle_vm_fault thread vmfault_type + \\r. globals_equiv st\" + unfolding handle_vm_fault_def + by (cases vmfault_type; wpsimp wp: dmo_no_mem_globals_equiv) + +lemma handle_hypervisor_fault_globals_equiv[Syscall_IF_assms]: + "handle_hypervisor_fault thread hypfault_type \globals_equiv st\" + apply (cases hypfault_type; wpsimp split_del: if_split) + sorry + +crunch arch_activate_idle_thread, handle_spurious_irq + for globals_equiv[Syscall_IF_assms, wp]: "globals_equiv st" + +lemma select_f_setNextPC_reads_respects[Syscall_IF_assms, wp]: + "reads_respects aag l \ (select_f (setNextPC a b))" + unfolding setNextPC_def setRegister_def + by (wpsimp simp: select_f_returns) + +lemma select_f_getRestartPC_reads_respects[Syscall_IF_assms, wp]: + "reads_respects aag l \ (select_f (getRestartPC a))" + unfolding getRestartPC_def getRegister_def + by (wpsimp simp: select_f_returns) + +lemma arch_activate_idle_thread_reads_respects[Syscall_IF_assms, wp]: + "reads_respects aag l \ (arch_activate_idle_thread t)" + unfolding arch_activate_idle_thread_def by wpsimp + +lemma decode_asid_pool_invocation_authorised_for_globals: + "\invs and cte_wp_at ((=) (ArchObjectCap cap)) slot + and (\s. \(cap, slot) \ set excaps. cte_wp_at ((=) cap) slot s)\ + decode_asid_pool_invocation label msg slot cap excaps + \authorised_for_globals_arch_inv\, -" + unfolding authorised_for_globals_arch_inv_def decode_asid_pool_invocation_def + apply (simp add: split_def Let_def cong: if_cong split del: if_split) + apply wpsimp + done + +lemma decode_asid_control_invocation_authorised_for_globals: + "\invs and cte_wp_at ((=) (ArchObjectCap cap)) slot + and (\s. \(cap, slot) \ set excaps. cte_wp_at ((=) cap) slot s)\ + decode_asid_control_invocation label msg slot cap excaps + \authorised_for_globals_arch_inv\, -" + unfolding authorised_for_globals_arch_inv_def decode_asid_control_invocation_def + apply (simp add: split_def Let_def cong: if_cong split del: if_split) + apply wpsimp + done + +lemma decode_frame_invocation_authorised_for_globals: + "\invs and cte_wp_at ((=) (ArchObjectCap cap)) slot + and (\s. \(cap, slot) \ set excaps. cte_wp_at ((=) cap) slot s)\ + decode_frame_invocation label msg slot cap excaps + \authorised_for_globals_arch_inv\, -" + unfolding authorised_for_globals_arch_inv_def authorised_for_globals_page_inv_def + decode_frame_invocation_def decode_fr_inv_map_def + apply (simp add: split_def Let_def cong: arch_cap.case_cong if_cong split del: if_split) + apply (wpsimp wp: check_vp_wpR) + sorry +(* + apply (subgoal_tac + "(\a b. cte_wp_at (parent_for_refs (make_user_pte (addrFromPPtr x) + (attribs_from_word (msg ! 2)) + (mask_vm_rights xa + (data_to_rights (msg ! Suc 0))), + ba)) (a, b) s)", clarsimp) + apply (clarsimp simp: parent_for_refs_def cte_wp_at_caps_of_state) + apply (frule vspace_for_asid_vs_lookup) + apply (frule_tac vptr="msg ! 0" in pt_lookup_slot_cap_to) + apply fastforce + apply (fastforce elim: vs_lookup_table_is_aligned) + apply (drule not_le_imp_less) + apply (frule order.strict_implies_order[where b=user_vtop]) + apply (drule order.strict_trans[OF _ user_vtop_pptr_base]) + apply (drule canonical_below_pptr_base_user) + apply (erule below_user_vtop_canonical) + apply (clarsimp simp: vmsz_aligned_def) + apply (drule is_aligned_no_overflow_mask) + apply (clarsimp simp: user_region_def) + apply (erule (1) dual_order.trans) + apply assumption + apply (fastforce simp: is_pt_cap_def is_PageTableCap_def split: option.splits) + done +*) + +lemma decode_page_table_invocation_authorised_for_globals: + "\invs and cte_wp_at ((=) (ArchObjectCap cap)) slot + and (\s. \(cap, slot) \ set excaps. cte_wp_at ((=) cap) slot s)\ + decode_page_table_invocation label msg slot cap excaps + \authorised_for_globals_arch_inv\, -" + unfolding authorised_for_globals_arch_inv_def authorised_for_globals_page_table_inv_def + decode_page_table_invocation_def decode_pt_inv_map_def + apply (simp add: split_def Let_def cong: arch_cap.case_cong if_cong split del: if_split) + apply (wpsimp cong: if_cong wp: hoare_vcg_if_lift2) + apply (clarsimp simp: pt_lookup_slot_from_level_def pt_lookup_slot_def) + apply (frule (1) pt_lookup_vs_lookupI, clarsimp) + apply (drule vs_lookup_level) + apply (frule pt_walk_max_level) + sorry +(* + apply (subgoal_tac "msg ! 0 \ user_region") + apply (frule reachable_page_table_not_global; clarsimp?) + apply (frule vs_lookup_table_is_aligned; clarsimp?) + apply (fastforce dest: global_pt_in_global_refs invs_arch_state simp: valid_arch_state_def) + apply (drule not_le_imp_less) + apply (frule order.strict_implies_order[where b=user_vtop]) + apply (drule order.strict_trans[OF _ user_vtop_pptr_base]) + apply (drule canonical_below_pptr_base_user) + apply (erule below_user_vtop_canonical) + apply clarsimp + done +*) + +lemma decode_sgi_signal_invocation_authorised_for_globals: + "\\\ decode_sgi_signal_invocation cap + \authorised_for_globals_arch_inv\, -" + unfolding decode_sgi_signal_invocation_def authorised_for_globals_arch_inv_def + by wpsimp + +lemma decode_vspace_invocation_authorised_for_globals: + "\invs and cte_wp_at ((=) (ArchObjectCap cap)) slot + and (\s. \(cap, slot) \ set excaps. cte_wp_at ((=) cap) slot s)\ + decode_vspace_invocation label msg slot cap excaps + \authorised_for_globals_arch_inv\, -" + unfolding decode_vspace_invocation_def + sorry + +lemma decode_vcpu_invocation_authorised_for_globals: + "\invs and cte_wp_at ((=) (ArchObjectCap cap)) slot + and (\s. \(cap, slot) \ set excaps. cte_wp_at ((=) cap) slot s)\ + decode_vcpu_invocation label msg cap excaps + \authorised_for_globals_arch_inv\, -" + unfolding decode_vcpu_invocation_def + sorry + +lemma decode_arch_invocation_authorised_for_globals[Syscall_IF_assms]: + "\invs and cte_wp_at ((=) (ArchObjectCap cap)) slot + and (\s. \(cap, slot) \ set excaps. cte_wp_at ((=) cap) slot s)\ + arch_decode_invocation label msg x_slot slot cap excaps + \authorised_for_globals_arch_inv\, -" + unfolding arch_decode_invocation_def + by (wpsimp wp: decode_sgi_signal_invocation_authorised_for_globals + decode_vspace_invocation_authorised_for_globals + decode_vcpu_invocation_authorised_for_globals + decode_asid_pool_invocation_authorised_for_globals + decode_asid_control_invocation_authorised_for_globals + decode_frame_invocation_authorised_for_globals + decode_page_table_invocation_authorised_for_globals, fastforce) + +lemma arch_prepare_set_domain_globals_equiv[Syscall_IF_assms]: + "arch_prepare_set_domain t new_dom \globals_equiv st\" + sorry + +lemma arch_prepare_set_domain_valid_arch_state[Syscall_IF_assms]: + "arch_prepare_set_domain t new_dom \valid_arch_state\" + sorry + +end + + +global_interpretation Syscall_IF_1?: Syscall_IF_1 +proof goal_cases + interpret Arch . + case 1 show ?case + by (unfold_locales; (fact Syscall_IF_assms)?) +qed + +end diff --git a/proof/infoflow/AARCH64/ArchTcb_IF.thy b/proof/infoflow/AARCH64/ArchTcb_IF.thy new file mode 100644 index 0000000000..1ebeebe795 --- /dev/null +++ b/proof/infoflow/AARCH64/ArchTcb_IF.thy @@ -0,0 +1,450 @@ +(* + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +theory ArchTcb_IF +imports Tcb_IF +begin + +context Arch begin global_naming AARCH64 + +named_theorems Tcb_IF_assms + +crunch set_irq_state, suspend + for arm_us_global_vspace[wp]: "\s. P (arm_us_global_vspace (arch_state s))" + (wp: mapM_x_wp select_inv hoare_vcg_if_lift2 hoare_drop_imps dxo_wp_weak + simp: unless_def + ignore: empty_slot_ext reschedule_required) + +crunch as_user, restart + for arm_us_global_vspace[wp]: "\s. P (arm_us_global_vspace (arch_state s))" (wp: dxo_wp_weak) + +lemma cap_ne_global_pt: + "\ ex_nonz_cap_to word s; valid_global_refs s; valid_global_arch_objs s \ + \ word \ arm_us_global_vspace (arch_state s)" + unfolding ex_nonz_cap_to_def + apply (simp only: cte_wp_at_caps_of_state zobj_refs_to_obj_refs) + apply (elim exE conjE) + apply (drule valid_global_refsD2,simp) + apply (unfold global_refs_def) + apply clarsimp + apply (unfold cap_range_def) + apply blast + done + +lemma valid_arch_caps_vs_lookup[Tcb_IF_assms]: + "valid_arch_caps s \ valid_vs_lookup s" + by (simp add: valid_arch_caps_def) + +lemma no_cap_to_idle_thread'[Tcb_IF_assms]: + "valid_global_refs s \ \ ex_nonz_cap_to (idle_thread s) s" + apply (clarsimp simp add: ex_nonz_cap_to_def valid_global_refs_def valid_refs_def) + apply (drule_tac x=a in spec) + apply (drule_tac x=b in spec) + apply (clarsimp simp: cte_wp_at_def global_refs_def cap_range_def) + apply (case_tac cap,simp_all) + done + +lemma no_cap_to_idle_thread''[Tcb_IF_assms]: + "valid_global_refs s \ caps_of_state s ref \ Some (ThreadCap (idle_thread s))" + apply (clarsimp simp add: valid_global_refs_def valid_refs_def cte_wp_at_caps_of_state) + apply (drule_tac x="fst ref" in spec) + apply (drule_tac x="snd ref" in spec) + apply (simp add: cap_range_def global_refs_def) + done + +crunch arch_post_modify_registers + for globals_equiv[Tcb_IF_assms, wp]: "globals_equiv st" + and valid_arch_state[Tcb_IF_assms, wp]: valid_arch_state + +lemma arch_post_modify_registers_reads_respects_f[Tcb_IF_assms, wp]: + "reads_respects_f aag l \ (arch_post_modify_registers cur t)" + by wpsimp + +(* FIXME AARCH64 IF: consolidate with reads_equiv_valid_rv_inv_f *) +lemma reads_equiv_valid_inv_f: + assumes a: "reads_equiv_valid_inv A aag P f" + assumes b: "\P. \P\ f \\_. P\" + shows "equiv_valid_inv (reads_equiv_f aag) A P f" + using reads_equiv_valid_rv_inv_f equiv_valid_def2 a b by blast + +lemma arch_get_sanitise_register_info_reads_respects_f[Tcb_IF_assms, wp]: + "reads_respects_f aag l (K (aag_can_read_or_affect aag l rv)) (arch_get_sanitise_register_info rv)" + unfolding arch_get_sanitise_register_info_def + by (wpsimp wp: reads_equiv_valid_inv_f) + +end + + +global_interpretation Tcb_IF_1?: Tcb_IF_1 +proof goal_cases + interpret Arch . + case 1 show ?case + by (unfold_locales; (fact Tcb_IF_assms)?) +qed + + +context Arch begin global_naming AARCH64 + +lemma valid_ipc_buffer_cap_is_nondevice_page_cap: + "\valid_ipc_buffer_cap cap ptr; cap \ NullCap\ \ is_nondevice_page_cap cap" + by (clarsimp simp: valid_ipc_buffer_cap_def split: cap.splits arch_cap.splits) + +lemma is_valid_vtable_root_def2: + "is_valid_vtable_root c = (\r a. c = ArchObjectCap (PageTableCap r VSRootPT_T (Some a)))" + by (auto simp: is_valid_vtable_root_def split: cap.splits arch_cap.splits option.splits pt_type.splits) + +(* FIXME: Pretty general. Probably belongs somewhere else *) +lemma invoke_tcb_thread_preservation[Tcb_IF_assms]: + notes is_nondevice_page_cap_simps[simp del] + assumes cap_delete_P: "\slot. \invs and P and emptyable slot\ cap_delete slot \\_. P\" + assumes cap_insert_P: "\new_cap src dest. \invs and P\ cap_insert new_cap src dest \\_. P\" + assumes thread_set_P: "\f ptr. \invs and P\ thread_set (tcb_ipc_buffer_update f) ptr \\_. P\" + assumes thread_set_P': "\f ptr. \invs and P\ thread_set (tcb_fault_handler_update f) ptr \\_. P\" + assumes set_mcpriority_P: "\mcp ptr. \invs and P\ set_mcpriority ptr mcp \\_.P\" + assumes set_priority_P: "\prio ptr. \invs and P\ set_priority ptr prio \\_.P\" + assumes reschedule_required_P: "reschedule_required \P\" + assumes P_trans[simp]: "\f s. P (trans_state f s) = P s" + shows + "\P and invs and tcb_inv_wf (tcb_invocation.ThreadControl t sl ep mcp prio croot vroot buf)\ + invoke_tcb (tcb_invocation.ThreadControl t sl ep mcp prio croot vroot buf) + \\rv s :: det_state. P s\" (is "\?pre\ _ \_\") + apply (simp add: split_def cong: option.case_cong) + apply (rule hoare_weaken_pre) + apply (rule_tac P="case ep of Some v \ length v = word_bits | _ \ True" + in hoare_gen_asm) + apply wp + apply (wpsimp wp: set_priority_P) + apply (rule_tac Q'="\_. invs and P" and E'="\_. P" in hoare_post_impE; clarsimp) + apply ((simp add: conj_comms(1, 2) + | rule wp_split_const_if wp_split_const_if_R hoare_vcg_all_liftE_R + hoare_vcg_conj_elimE hoare_vcg_const_imp_liftE_R hoare_vcg_conj_liftE_R + | (wp check_cap_inv2[where Q="\_ s. t \ idle_thread s"] + out_invs_trivial case_option_wpE cap_delete_deletes + cap_delete_valid_cap cap_insert_valid_cap out_cte_at + cap_insert_cte_at cap_delete_cte_at out_valid_cap out_tcb_valid + hoare_vcg_const_imp_liftE_R hoare_vcg_all_liftE_R + thread_set_tcb_ipc_buffer_cap_cleared_invs + thread_set_invs_trivial[OF ball_tcb_cap_casesI] + hoare_vcg_all_lift thread_set_valid_cap out_emptyable + check_cap_inv [where P="valid_cap c" for c] + check_cap_inv [where P="tcb_cap_valid c p" for c p] + check_cap_inv[where P="cte_at p0" for p0] + check_cap_inv[where P="tcb_at p0" for p0] + thread_set_cte_at thread_set_no_cap_to_trivial[OF ball_tcb_cap_casesI] + checked_insert_no_cap_to + thread_set_cte_wp_at_trivial[where Q="\x. x", OF ball_tcb_cap_casesI] + out_no_cap_to_trivial[OF ball_tcb_cap_casesI] thread_set_ipc_tcb_cap_valid + check_cap_inv2[where Q="\_. P"] cap_delete_P cap_insert_P + thread_set_P thread_set_P' set_mcpriority_P set_mcpriority_idle_thread + reschedule_required_P dxo_wp_weak hoare_weak_lift_imp) + | simp add: ran_tcb_cap_cases dom_tcb_cap_cases[simplified] emptyable_def + | wpc + | strengthen use_no_cap_to_obj_asid_strg[simplified conj_comms] + tcb_cap_always_valid_strg[where p="tcb_cnode_index 0"] + tcb_cap_always_valid_strg[where p="tcb_cnode_index (Suc 0)"])+) (*slow*) + apply (unfold option_update_thread_def) + apply (wp itr_wps thread_set_P thread_set_P' + | simp add: emptyable_def | wpc)+ (*also slow*) + apply clarsimp + by (clarsimp simp: tcb_at_cte_at_0 tcb_at_cte_at_1[simplified] + is_cap_simps is_valid_vtable_root_def2 + is_cnode_or_valid_arch_def tcb_cap_valid_def + tcb_at_st_tcb_at[symmetric] invs_valid_objs + cap_asid_def vs_cap_ref_def + clas_no_asid cli_no_irqs no_cap_to_idle_thread + valid_ipc_buffer_cap_is_nondevice_page_cap + split: option.split_asm) + +lemma tc_reads_respects_f[Tcb_IF_assms]: + assumes domains_distinct[wp]: "pas_domains_distinct aag" + and tc[simp]: "ti = ThreadControl x41 x42 x43 x44 x45 x46 x47 x48" + notes validE_valid[wp del] hoare_weak_lift_imp [wp] + shows + "reads_respects_f aag l + (silc_inv aag st and only_timer_irq_inv irq st' and einvs and simple_sched_action + and pas_refined aag and pas_cur_domain aag and tcb_inv_wf ti + and is_subject aag \ cur_thread + and K (authorised_tcb_inv aag ti \ authorised_tcb_inv_extra aag ti)) + (invoke_tcb ti)" + apply (simp add: split_def cong: option.case_cong) + apply (wpsimp wp: set_priority_reads_respects[THEN reads_respects_f[where st=st and Q=\]]) + apply (wpsimp wp: hoare_vcg_const_imp_liftE_R simp: when_def | wpc)+ + apply (rule conjI) + apply ((wpsimp wp: reschedule_required_reads_respects_f)+)[4] + apply ((wp reads_respects_f[OF cap_insert_reads_respects, where st=st] + reads_respects_f[OF thread_set_reads_respects, where st=st and Q="\"] + set_priority_reads_respects[THEN + reads_respects_f[where aag=aag and st=st and Q=\]] + set_mcpriority_reads_respects[THEN + reads_respects_f[where aag=aag and st=st and Q=\]] + check_cap_inv[OF check_cap_inv[OF cap_insert_valid_list]] + check_cap_inv[OF check_cap_inv[OF cap_insert_valid_sched]] + check_cap_inv[OF check_cap_inv[OF cap_insert_schedact]] + check_cap_inv[OF check_cap_inv[OF cap_insert_cur_domain]] + check_cap_inv[OF check_cap_inv[OF cap_insert_ct]] + get_thread_state_rev[THEN + reads_respects_f[where aag=aag and st=st and Q=\]] + hoare_vcg_all_liftE_R hoare_vcg_all_lift + cap_delete_reads_respects[where st=st] checked_insert_pas_refined + thread_set_pas_refined + reads_respects_f[OF checked_insert_reads_respects, where st=st] + checked_cap_insert_silc_inv[where st=st] + cap_delete_silc_inv_not_transferable[where st=st] + checked_cap_insert_only_timer_irq_inv[where st=st' and irq=irq] + cap_delete_only_timer_irq_inv[where st=st' and irq=irq] + set_priority_only_timer_irq_inv[where st=st' and irq=irq] + set_mcpriority_only_timer_irq_inv[where st=st' and irq=irq] + cap_delete_deletes cap_delete_valid_cap cap_delete_cte_at + cap_delete_pas_refined' itr_wps(12) itr_wps(14) cap_insert_cte_at + checked_insert_no_cap_to hoare_vcg_const_imp_liftE_R hoare_vcg_conj_lift + as_user_reads_respects_f thread_set_mdb cap_delete_invs + thread_set_valid_arch_state + | wpc + | simp add: emptyable_def tcb_cap_cases_def tcb_cap_valid_def + tcb_at_st_tcb_at when_def + | strengthen use_no_cap_to_obj_asid_strg invs_mdb + | solves auto)+)[7] + apply ((simp add: conj_comms, strengthen imp_consequent[where Q="x = None" for x] + , simp cong: conj_cong) + | wp reads_respects_f[OF cap_insert_reads_respects, where st=st] + reads_respects_f[OF thread_set_reads_respects, where st=st and Q="\"] + set_priority_reads_respects[THEN reads_respects_f[where st=st and Q=\]] + set_mcpriority_reads_respects[THEN reads_respects_f[where st=st and Q=\]] + check_cap_inv[OF check_cap_inv[OF cap_insert_valid_list]] + check_cap_inv[OF check_cap_inv[OF cap_insert_valid_sched]] + check_cap_inv[OF check_cap_inv[OF cap_insert_schedact]] + check_cap_inv[OF check_cap_inv[OF cap_insert_cur_domain]] + check_cap_inv[OF check_cap_inv[OF cap_insert_ct]] + get_thread_state_rev[THEN reads_respects_f[where st=st and Q=\]] + hoare_vcg_all_liftE_R hoare_vcg_all_lift + cap_delete_reads_respects[where st=st] checked_insert_pas_refined + thread_set_pas_refined reads_respects_f[OF checked_insert_reads_respects] + checked_cap_insert_silc_inv[where st=st] + cap_delete_silc_inv_not_transferable[where st=st] + checked_cap_insert_only_timer_irq_inv[where st=st' and irq=irq] + cap_delete_only_timer_irq_inv[where st=st' and irq=irq] + set_priority_only_timer_irq_inv[where st=st' and irq=irq] + set_mcpriority_only_timer_irq_inv[where st=st' and irq=irq] + cap_delete_deletes cap_delete_valid_cap cap_delete_cte_at + cap_delete_pas_refined' itr_wps(12) itr_wps(14) cap_insert_cte_at + checked_insert_no_cap_to hoare_vcg_const_imp_liftE_R + as_user_reads_respects_f cap_delete_invs + | wpc + | simp add: emptyable_def tcb_cap_cases_def tcb_cap_valid_def when_def st_tcb_at_triv + | strengthen use_no_cap_to_obj_asid_strg invs_mdb + | wp (once) hoare_drop_imp)+ + apply (simp add: option_update_thread_def tcb_cap_cases_def + | wp hoare_weak_lift_imp hoare_weak_lift_imp_conj thread_set_pas_refined + reads_respects_f[OF thread_set_reads_respects, where st=st and Q="\"] + thread_set_valid_arch_state + | wpc)+ + apply (wp hoare_vcg_all_lift thread_set_tcb_fault_handler_update_invs + thread_set_tcb_fault_handler_update_silc_inv + thread_set_not_state_valid_sched + thread_set_pas_refined thread_set_emptyable thread_set_valid_cap + thread_set_cte_at thread_set_no_cap_to_trivial + thread_set_tcb_fault_handler_update_only_timer_irq_inv + thread_set_valid_arch_state + | simp add: tcb_cap_cases_def | wpc | wp (once) hoare_drop_imp)+ + apply (clarsimp simp: authorised_tcb_inv_def authorised_tcb_inv_extra_def emptyable_def) + apply (clarsimp cong: conj_cong) + apply (intro conjI impI allI) + (* slow *) + by (clarsimp simp: is_cap_simps is_cnode_or_valid_arch_def is_valid_vtable_root_def + det_setRegister option.disc_eq_case[symmetric] + split: cap.splits arch_cap.splits option.split pt_type.splits)+ + +lemma set_arm_current_fpu_owner_None_reads_respects: + assumes domains_distinct[wp]: "pas_domains_distinct aag" + shows "reads_respects aag l \ (set_arm_current_fpu_owner None)" + unfolding set_arm_current_fpu_owner_def maybeM_def arch_thread_set_is_thread_set + apply clarsimp + apply (wpsimp wp: modify_ev thread_set_reads_respects) + apply auto + subgoal sorry (* FIXME AARCH64 IF: requires equiv fpu arch state *) + by (clarsimp simp: reads_equiv_def affects_equiv_def states_equiv_for_def + equiv_for_def equiv_asids_def equiv_asid_def silc_dom_equiv_def)+ + +lemma dmo_disableFpu_reads_respects[wp]: + "reads_respects aag l \ (do_machine_op disableFpu)" + unfolding disableFpu_def + apply (rule use_spec_ev) + apply (rule do_machine_op_spec_reads_respects) + apply wp + apply (simp add: equiv_valid_def2) + apply (rule modify_ev2) + apply (fastforce simp: equiv_for_def) + apply (rule machine_op_lift_ev') + apply (rule hoare_TrueI) + apply (rule conjI) + apply clarsimp + apply (clarsimp simp: equiv_for_def) + apply assumption + apply wpsimp + done + +lemma dmo_enableFpu_reads_respects[wp]: + "reads_respects aag l \ (do_machine_op enableFpu)" + unfolding enableFpu_def + apply (rule use_spec_ev) + apply (rule do_machine_op_spec_reads_respects) + apply wp + apply (simp add: equiv_valid_def2) + apply (rule modify_ev2) + apply (fastforce simp: equiv_for_def) + apply (rule machine_op_lift_ev') + apply (rule hoare_TrueI) + apply (rule conjI) + apply clarsimp + apply (clarsimp simp: equiv_for_def) + apply assumption + apply wpsimp + done + +lemma equiv_valid_state_assert: + "equiv_valid_inv I A P f + \ equiv_valid_inv I A P (do state_assert g; f od)" + unfolding state_assert_def bind_assoc K_bind_def + by (erule equiv_valid_get_assert) + +lemma dmo_readFpuState_reads_respects[wp]: + "reads_respects aag l \ (do_machine_op readFpuState)" + unfolding readFpuState_def + apply (rule use_spec_ev) + apply (rule do_machine_op_spec_reads_respects) + apply (rule equiv_valid_state_assert) + apply (rule equiv_valid_guard_imp) + apply wpsimp + subgoal sorry (* FIXME AARCH64 IF: requires equiv fpu state *) + apply wpsimp + done + +lemma save_fpu_state_reads_respects[wp]: + assumes domains_distinct[wp]: "pas_domains_distinct aag" + shows "reads_respects aag l (K (aag_can_read_or_affect aag l t)) (save_fpu_state t)" + unfolding save_fpu_state_def + by (wpsimp wp: as_user_reads_respects simp: setFPUState_def) + +lemma switch_local_fpu_owner_None_reads_respects: + assumes domains_distinct[wp]: "pas_domains_distinct aag" + shows "reads_respects aag l \ (switch_local_fpu_owner None)" + unfolding switch_local_fpu_owner_def maybeM_def + apply (wpsimp wp: set_arm_current_fpu_owner_None_reads_respects) + sorry (* FIXME AARCH64 IF: requires equiv fpu arch state *) + +lemma fpu_release_reads_respects: + assumes domains_distinct[wp]: "pas_domains_distinct aag" + shows "reads_respects aag l \ (fpu_release t)" + unfolding fpu_release_def + apply (wpsimp wp: switch_local_fpu_owner_None_reads_respects when_ev) + sorry (* FIXME AARCH64 IF: requires equiv fpu arch state *) + +lemma arch_post_set_flags_reads_respects_f[Tcb_IF_assms]: + assumes domains_distinct[wp]: "pas_domains_distinct aag" + shows "reads_respects_f aag l (silc_inv aag st) (arch_post_set_flags t flags)" + unfolding arch_post_set_flags_def + apply (rule equiv_valid_guard_imp) + apply (wpsimp wp: when_ev reads_respects_f[OF fpu_release_reads_respects]) + apply fastforce + done + +lemma arch_tcb_context_get_cur_fpu_update[simp]: + "arch_tcb_context_get (tcb_arch tcb\tcb_cur_fpu := fpu\) = arch_tcb_context_get (tcb_arch tcb)" + by (simp add: arch_tcb_context_get_def) + +lemma globals_equiv_fpu_owner_update[simp]: + "globals_equiv st (s\arch_state := arch_state s\arm_current_fpu_owner := t\\) = + globals_equiv st s" + by (auto simp add: globals_equiv_def idle_equiv_def) + +lemma set_arm_current_fpu_owner_globals_equiv[wp]: + "\globals_equiv st and valid_arch_state\ + set_arm_current_fpu_owner t + \\_. globals_equiv st\" + unfolding set_arm_current_fpu_owner_def arch_thread_set_is_thread_set + by (wpsimp wp: thread_set_globals_equiv thread_set_valid_arch_state hoare_vcg_all_lift hoare_drop_imps + | fastforce simp: ran_tcb_cap_cases)+ + +lemma dmo_enableFpu_globals_equiv[wp]: + "do_machine_op enableFpu \globals_equiv st\" + apply (clarsimp simp: enableFpu_def dmo_distr dmo_modify_distr) + apply (rule bind_wp_skip) + apply (unfold globals_equiv_def arch_globals_equiv_def idle_equiv_def enableFpu_def)[1] + apply wpsimp + apply wpsimp + done + +lemma dmo_disableFpu_globals_equiv[wp]: + "do_machine_op disableFpu \globals_equiv st\" + apply (clarsimp simp: disableFpu_def dmo_distr dmo_modify_distr) + apply (rule bind_wp_skip) + apply (unfold globals_equiv_def arch_globals_equiv_def idle_equiv_def enableFpu_def)[1] + apply wpsimp + apply wpsimp + done + +lemma dmo_state_assert_inv[wp]: + "do_machine_op (state_assert f) \P\" + unfolding state_assert_def + by (wpsimp wp: dmo_wp) + +lemma dmo_readFpuState_globals_equiv[wp]: + "do_machine_op readFpuState \globals_equiv st\" + by (wpsimp simp: readFpuState_def dmo_distr dmo_modify_distr) + +lemma dmo_writeFpuState_globals_equiv[wp]: + "do_machine_op (writeFpuState fpustate) \globals_equiv st\" + apply (clarsimp simp: writeFpuState_def dmo_distr dmo_modify_distr) + apply wpsimp + apply (clarsimp simp: globals_equiv_def idle_equiv_def) + done + +lemma as_user_getRestart_inv[wp]: + "as_user t getFPUState \P\" + unfolding getFPUState_def + by (wpsimp wp: as_user_inv) + +lemma load_fpu_state_globals_equiv[wp]: + "load_fpu_state t \globals_equiv st\" + unfolding load_fpu_state_def + by wpsimp + +lemma save_fpu_state_globals_equiv[wp]: + "\globals_equiv st and valid_arch_state and (\s. t \ idle_thread s)\ + save_fpu_state t + \\_. globals_equiv st\" + unfolding save_fpu_state_def + by wpsimp + +crunch load_fpu_state, save_fpu_state + for valid_arch_state[wp]: "valid_arch_state" + +lemma switch_local_fpu_owner_globals_equiv: + "\globals_equiv st and invs\ + switch_local_fpu_owner new_owner + \\_. globals_equiv st\" + unfolding switch_local_fpu_owner_def + apply (wpsimp wp: hoare_vcg_all_lift hoare_weak_lift_imp) + apply (fastforce simp: invs_def valid_state_def valid_pspace_def valid_cur_fpu_def + is_tcb_cur_fpu_def live_def arch_tcb_live_def + dest: idle_no_ex_cap if_live_then_nonz_capD) + done + +crunch arch_post_set_flags + for globals_equiv[Tcb_IF_assms]: "globals_equiv st" + (simp: crunch_simps) + +end + + +global_interpretation Tcb_IF_2?: Tcb_IF_2 +proof goal_cases + interpret Arch . + case 1 show ?case + by (unfold_locales; (fact Tcb_IF_assms)?) +qed + +end diff --git a/proof/infoflow/AARCH64/ArchUserOp_IF.thy b/proof/infoflow/AARCH64/ArchUserOp_IF.thy new file mode 100644 index 0000000000..7eb3f11d46 --- /dev/null +++ b/proof/infoflow/AARCH64/ArchUserOp_IF.thy @@ -0,0 +1,855 @@ +(* + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +theory ArchUserOp_IF +imports UserOp_IF +begin + +context Arch begin global_naming AARCH64 + +definition ptable_lift_s where + "ptable_lift_s s \ ptable_lift (cur_thread s) s" + +definition ptable_rights_s where + "ptable_rights_s s \ ptable_rights (cur_thread s) s" + +(* FIXME: move to ADT_AI.thy *) +definition ptable_attrs :: "obj_ref \ 's :: state_ext state \ obj_ref \ vm_attributes" where + "ptable_attrs tcb s \ + \addr. case_option {} (fst o snd o snd) + (get_page_info (aobjs_of s) (get_vspace_of_thread (kheap s) (arch_state s) tcb) addr)" + +definition ptable_attrs_s :: "'s :: state_ext state \ obj_ref \ vm_attributes" where + "ptable_attrs_s s \ ptable_attrs (cur_thread s) s" + +definition ptable_xn_s where + "ptable_xn_s s \ \addr. Execute \ ptable_attrs_s s addr" + + +type_synonym user_state_if = "user_context \ user_mem \ device_state" + +text \ + A user transition gives back a possible event that is the next + event the user wants to perform +\ +type_synonym user_transition_if = + "obj_ref \ vm_mapping \ mem_rights \ (obj_ref \ bool) \ + user_state_if \ (event option \ user_state_if) set" + + +definition do_user_op_if :: + "user_transition_if \ user_context \ (event option \ user_context,'z::state_ext) s_monad" where + "do_user_op_if uop tc = + do + \ \Get the page rights of each address (ReadOnly, ReadWrite, None, etc).\ + pr \ gets ptable_rights_s; + + \ \Fetch the execute bits of the current thread's page mappings.\ + pxn \ gets (\s x. pr x \ {} \ ptable_xn_s s x); + + \ \Get the mapping from virtual to physical addresses.\ + pl \ gets (\s. restrict_map (ptable_lift_s s) {x. pr x \ {}}); + + allow_read \ return {y. EX x. pl x = Some y \ AllowRead \ pr x}; + allow_write \ return {y. EX x. pl x = Some y \ AllowWrite \ pr x}; + + \ \Get the current thread.\ + t \ gets cur_thread; + + \ \Generate user memory by throwing away anything from global + memory that the user doesn't have access to. (The user must + have both (1) a mapping to the page; (2) that mapping has the + AllowRead right.\ + um \ gets (\s. (user_mem s) \ ptrFromPAddr); + dm \ gets (\s. (device_mem s) \ ptrFromPAddr); + ds \ gets (device_state \ machine_state); + + \ \Non-deterministically execute one of the user's operations.\ + u \ return (uop t pl pr pxn (tc, um|`allow_read, (ds \ ptrFromPAddr)|` allow_read)); + assert (u \ {}); + (e,(tc',um',ds')) \ select u; + + \ \Update the changes the user made to memory into our model. + We ignore changes that took place where they didn't have + write permissions. (uop shouldn't be doing that --- if it is, + uop isn't correctly modelling real hardware.)\ + do_machine_op (user_memory_update (((um' |` allow_write) \ addrFromPPtr) |` (-(dom ds)))); + + do_machine_op (device_memory_update (((ds' |` allow_write) \ addrFromPPtr) |` (dom ds))); + + return (e,tc') + od" + + +named_theorems UserOp_IF_assms + +lemma arch_globals_equiv_underlying_memory_update[UserOp_IF_assms, simp]: + "arch_globals_equiv ct it kh kh' as as' (underlying_memory_update f ms) ms' = + arch_globals_equiv ct it kh kh' as as' ms ms'" + "arch_globals_equiv ct it kh kh' as as' ms (underlying_memory_update f ms') = + arch_globals_equiv ct it kh kh' as as' ms ms'" + by auto + +lemma arch_globals_equiv_device_state_update[UserOp_IF_assms, simp]: + "arch_globals_equiv ct it kh kh' as as' (device_state_update f ms) ms' = + arch_globals_equiv ct it kh kh' as as' ms ms'" + "arch_globals_equiv ct it kh kh' as as' ms (device_state_update f ms') = + arch_globals_equiv ct it kh kh' as as' ms ms'" + by auto + +end + + +requalify_types AARCH64.user_transition_if + +global_interpretation UserOp_IF_1?: UserOp_IF_1 +proof goal_cases + interpret Arch . + case 1 show ?case + by (unfold_locales; (fact UserOp_IF_assms)?) +qed + + +context Arch begin global_naming AARCH64 + +lemma requiv_get_pt_of_thread_eq: + "\ reads_equiv aag s s'; pas_refined aag s; is_subject aag (cur_thread s); + pt_ref \ arm_us_global_vspace (arch_state s); pt_ref' \ arm_us_global_vspace (arch_state s'); + get_vspace_of_thread (kheap s) (arch_state s) (cur_thread s) = pt_ref; + get_vspace_of_thread (kheap s') (arch_state s') (cur_thread s') = pt_ref' \ + \ pt_ref = pt_ref'" + apply (erule reads_equivE) + apply (erule equiv_forE) + apply (subgoal_tac "aag_can_read aag (cur_thread s)") + apply (clarsimp simp: get_vspace_of_thread_eq) + apply simp + done + +lemma requiv_get_pt_entry_eq: + "\ reads_equiv aag s t; invs s; pas_refined aag s; is_subject aag pt; vref \ user_region; + \asid vref. vs_lookup_table max_pt_level asid vref s = Some (max_pt_level, pt) \ + \ pt_lookup_slot pt vref (ptes_of s) = pt_lookup_slot pt vref (ptes_of t)" + apply (clarsimp simp: pt_lookup_slot_def) + apply (clarsimp simp: pt_lookup_slot_from_level_def) + apply (frule_tac pt=pt and vptr=vref in pt_walk_reads_equiv[where bot_level=0]) + apply clarsimp+ + apply (fastforce simp: reads_equiv_f_def) + apply (fastforce elim: vs_lookup_table_vref_independent) + apply (clarsimp simp: obind_def split: option.splits) + done + +lemma requiv_get_page_info_eq: + "\ reads_equiv aag s s'; pas_refined aag s; invs s; is_subject aag pt; + \asid. vs_lookup_table max_pt_level asid x s = Some (max_pt_level, pt) \ + \ get_page_info (aobjs_of s) pt x = get_page_info (aobjs_of s') pt x" + apply (clarsimp simp: get_page_info_def obind_def) + apply (subgoal_tac "pt_lookup_slot pt x (ptes_of s) = pt_lookup_slot pt x (ptes_of s')") + apply (clarsimp split: option.splits) + apply (case_tac "pt_lookup_slot pt x (ptes_of s') = Some (a, b)"; clarsimp) + apply (frule_tac ptr=b in ptes_of_reads_equiv[rotated]) + apply (clarsimp simp: pt_lookup_slot_def) + apply (rule_tac pt_lookup_slot_from_level_is_subject) + apply fastforce+ + apply (rule requiv_get_pt_entry_eq; fastforce) + done + +lemma requiv_vspace_of_thread_global_pt: + "\ reads_equiv aag s s'; is_subject aag (cur_thread s); invs s; pas_refined aag s; + get_vspace_of_thread (kheap s) (arch_state s) (cur_thread s) = global_pt s \ + \ get_vspace_of_thread (kheap s') (arch_state s') (cur_thread s') = global_pt s'" + apply (erule reads_equivE) + apply (erule equiv_forE) + apply (prop_tac "aag_can_read aag (cur_thread s)", simp) + apply (clarsimp simp: get_vspace_of_thread_def + split: option.split kernel_object.splits cap.splits arch_cap.splits pt_type.splits) + apply (rename_tac tcb pt asid vref) + apply (subgoal_tac "aag_can_read_asid aag asid") + apply (subgoal_tac "s \ ArchObjectCap (PageTableCap pt VSRootPT_T (Some (asid,vref)))") + apply (clarsimp simp: equiv_asids_def equiv_asid_def valid_cap_def obind_def + vspace_for_asid_def vspace_for_pool_def pool_for_asid_def) + apply (clarsimp simp: word_gt_0 typ_at_eq_kheap_obj) + apply (drule_tac x=asid in spec) + apply (case_tac "asid = 0"; clarsimp) + apply (clarsimp simp: asid_pools_of_ko_at obj_at_def) + apply (clarsimp simp: asid_low_bits_of_def opt_map_def + entry_for_asid_def entry_for_pool_def pool_for_asid_def obind_def + split: option.splits) + apply (drule invs_valid_global_refs) + apply (drule_tac ptr="((cur_thread s), tcb_cnode_index 1)" in valid_global_refsD2[rotated]) + apply (subst caps_of_state_tcb_cap_cases) + apply (simp add: get_tcb_def) + apply (simp add: dom_tcb_cap_cases[simplified]) + apply simp + apply (simp add: cap_range_def global_refs_def) + apply (cut_tac s=s and t="cur_thread s" and tcb=tcb in objs_valid_tcb_vtable) + apply (fastforce simp: invs_valid_objs get_tcb_def)+ + apply (subgoal_tac "(pasObjectAbs aag (cur_thread s), Control, pasASIDAbs aag asid) + \ state_asids_to_policy aag s") + apply (frule pas_refined_Control_into_is_subject_asid) + apply (fastforce simp: pas_refined_def) + apply simp + apply (cut_tac aag=aag and ptr="(cur_thread s, tcb_cnode_index 1)" in sata_asid) + prefer 3 + apply (simp add: caps_of_state_tcb_cap_cases get_tcb_def dom_tcb_cap_cases[simplified])+ + done + +lemma vspace_for_asid_get_vspace_of_thread: + "get_vspace_of_thread (kheap s) (arch_state s) ct \ global_pt s + \ \asid. vspace_for_asid asid s = Some (get_vspace_of_thread (kheap s) (arch_state s) ct)" + by (fastforce simp: get_vspace_of_thread_def + split: option.splits kernel_object.splits cap.splits arch_cap.splits pt_type.splits) + +lemma pt_of_thread_same_agent: + "\ pas_refined aag s; is_subject aag tcb_ptr; + get_vspace_of_thread (kheap s) (arch_state s) tcb_ptr = pt; pt \ global_pt s \ + \ pasObjectAbs aag tcb_ptr = pasObjectAbs aag pt" + apply (rule_tac aag="pasPolicy aag" in aag_wellformed_Control[rotated]) + apply (fastforce simp: pas_refined_def) + apply (rule pas_refined_mem[rotated], simp) + apply (clarsimp simp: get_vspace_of_thread_eq) + apply (cut_tac ptr="(tcb_ptr, tcb_cnode_index 1)" in sbta_caps) + prefer 4 + apply (simp add: state_objs_to_policy_def) + apply (subst caps_of_state_tcb_cap_cases) + apply (simp add: get_tcb_def) + apply (simp add: dom_tcb_cap_cases[simplified]) + apply simp + apply (simp add: obj_refs_def) + apply (simp add: cap_auth_conferred_def arch_cap_auth_conferred_def) + done + +lemma requiv_ptable_rights_eq: + "\ reads_equiv aag s s'; pas_refined aag s; pas_refined aag s'; + is_subject aag (cur_thread s); invs s; invs s' \ + \ ptable_rights_s s = ptable_rights_s s'" + apply (simp add: ptable_rights_s_def) + apply (rule ext) + apply (case_tac "get_vspace_of_thread (kheap s) (arch_state s) (cur_thread s) = global_pt s") + apply (frule requiv_vspace_of_thread_global_pt) + apply (auto)[4] + apply (fastforce dest: get_page_info_gpd_kmaps[rotated 3] + simp: ptable_rights_def invs_valid_global_objs invs_arch_state + split: option.splits)+ + apply (case_tac "get_vspace_of_thread (kheap s') (arch_state s') (cur_thread s') = global_pt s'") + apply (drule reads_equiv_sym) + apply (frule requiv_vspace_of_thread_global_pt; fastforce simp: reads_equiv_def) + apply (simp add: ptable_rights_def) + apply (frule requiv_get_pt_of_thread_eq) + apply (auto)[6] + apply (frule pt_of_thread_same_agent, simp+) + apply (subst requiv_get_page_info_eq, simp+) + apply (drule sym[where s="get_vspace_of_thread _ _ _"], clarsimp) + apply (fastforce dest: get_vspace_of_thread_reachable elim: vs_lookup_table_vref_independent)+ + done + +lemma requiv_ptable_attrs_eq: + "\ reads_equiv aag s s'; pas_refined aag s; pas_refined aag s'; + is_subject aag (cur_thread s); invs s; invs s'; ptable_rights_s s x \ {} \ + \ ptable_attrs_s s x = ptable_attrs_s s' x" + apply (simp add: ptable_attrs_s_def ptable_rights_s_def) + apply (case_tac "get_vspace_of_thread (kheap s) (arch_state s) (cur_thread s) = + arm_us_global_vspace (arch_state s)") + apply (frule requiv_vspace_of_thread_global_pt) + apply (fastforce+)[4] + apply (clarsimp simp: ptable_attrs_def split: option.splits) + apply (rule conjI) + apply clarsimp + apply (frule get_page_info_gpd_kmaps[rotated 3]) + apply ((fastforce simp: invs_valid_global_objs invs_arch_state)+)[4] + apply clarsimp + apply (rule conjI) + apply (frule get_page_info_gpd_kmaps[rotated 3]) + apply ((fastforce simp: invs_valid_global_objs invs_arch_state)+)[4] + apply clarsimp + apply (frule get_page_info_gpd_kmaps[rotated 3]) + apply ((fastforce simp: invs_valid_global_objs invs_arch_state)+)[4] + apply (case_tac "get_vspace_of_thread (kheap s') (arch_state s') (cur_thread s') = + arm_us_global_vspace (arch_state s')") + apply (drule reads_equiv_sym) + apply (frule requiv_vspace_of_thread_global_pt) + apply ((fastforce simp: reads_equiv_def)+)[5] + apply (simp add: ptable_attrs_def) + apply (frule requiv_get_pt_of_thread_eq, simp+)[1] + apply (frule pt_of_thread_same_agent, simp+)[1] + apply (subst requiv_get_page_info_eq, simp+) + apply (drule sym[where s="get_vspace_of_thread _ _ _"], clarsimp) + apply (fastforce dest: get_vspace_of_thread_reachable elim: vs_lookup_table_vref_independent)+ + done + +lemma requiv_ptable_lift_eq: + "\ reads_equiv aag s s'; pas_refined aag s; pas_refined aag s'; invs s; + invs s'; is_subject aag (cur_thread s); ptable_rights_s s x \ {} \ + \ ptable_lift_s s x = ptable_lift_s s' x" + apply (simp add: ptable_lift_s_def ptable_rights_s_def) + apply (case_tac "get_vspace_of_thread (kheap s) (arch_state s) (cur_thread s) = + arm_us_global_vspace (arch_state s)") + apply (frule requiv_vspace_of_thread_global_pt) + apply (fastforce+)[4] + apply (clarsimp simp: ptable_lift_def split: option.splits) + apply (rule conjI) + apply clarsimp + apply (frule get_page_info_gpd_kmaps[rotated 3]) + apply ((fastforce simp: invs_valid_global_objs invs_arch_state)+)[4] + apply clarsimp + apply (rule conjI) + apply (frule get_page_info_gpd_kmaps[rotated 3]) + apply ((fastforce simp: invs_valid_global_objs invs_arch_state)+)[4] + apply clarsimp + apply (frule get_page_info_gpd_kmaps[rotated 3]) + apply ((fastforce simp: invs_valid_global_objs invs_arch_state)+)[4] + apply (case_tac "get_vspace_of_thread (kheap s') (arch_state s') (cur_thread s') = + arm_us_global_vspace (arch_state s')") + apply (drule reads_equiv_sym) + apply (frule requiv_vspace_of_thread_global_pt) + apply ((fastforce simp: reads_equiv_def)+)[5] + apply (simp add: ptable_lift_def) + apply (frule requiv_get_pt_of_thread_eq, simp+)[1] + apply (frule pt_of_thread_same_agent, simp+)[1] + apply (subst requiv_get_page_info_eq, simp+) + apply (drule sym[where s="get_vspace_of_thread _ _ _"], clarsimp) + apply (fastforce dest: get_vspace_of_thread_reachable elim: vs_lookup_table_vref_independent)+ + done + +lemma requiv_ptable_xn_eq: + "\ reads_equiv aag s s'; pas_refined aag s; pas_refined aag s'; + is_subject aag (cur_thread s); invs s; invs s'; ptable_rights_s s x \ {} \ + \ ptable_xn_s s x = ptable_xn_s s' x" + by (simp add: ptable_xn_s_def requiv_ptable_attrs_eq) + +lemma data_at_obj_range: + "\ data_at sz ptr s; pspace_aligned s; valid_objs s \ + \ ptr + (offset && mask (pageBitsForSize sz)) \ obj_range ptr (ArchObj (DataPage dev sz))" + apply (clarsimp simp: data_at_def) + apply (elim disjE) + apply (clarsimp simp: obj_at_def) + apply (drule (2) ptr_in_obj_range) + apply (clarsimp simp: obj_bits_def obj_range_def) + apply fastforce + apply (clarsimp simp: obj_at_def) + apply (drule (2) ptr_in_obj_range) + apply (clarsimp simp: obj_bits_def obj_range_def) + apply fastforce + done + +lemma obj_range_data_for_cong: + "obj_range ptr (ArchObj (DataPage dev sz')) = obj_range ptr (ArchObj (DataPage False sz'))" + by (simp add: obj_range_def) + +lemma pspace_distinct_def': + "pspace_distinct \ + \s. \x y ko ko'. kheap s x = Some ko \ kheap s y = Some ko' \ x \ y + \ obj_range x ko \ obj_range y ko' = {}" + by (auto simp: pspace_distinct_def obj_range_def field_simps) + +lemma data_at_disjoint_equiv: + "\ ptr' \ ptr;data_at sz' ptr' s; data_at sz ptr s; valid_objs s; pspace_aligned s; + pspace_distinct s; ptr' \ obj_range ptr (ArchObj (DataPage dev sz)) \ + \ False" + apply (frule (2) data_at_obj_range[where offset = 0,simplified]) + apply (clarsimp simp: data_at_def obj_at_def) + apply (elim disjE) + by (clarsimp dest!: spec simp: obj_at_def pspace_distinct_def' + , erule impE, erule conjI2[OF conjI2], (fastforce+)[2] + , fastforce cong: obj_range_data_for_cong)+ + +lemma is_aligned_pptrBaseOffset: + "is_aligned pptrBaseOffset (pageBitsForSize sz)" + by (case_tac sz, simp_all add: pptrBaseOffset_def paddrBase_def pageBits_def canonical_bit_def + ptTranslationBits_def pptrBase_def is_aligned_def) + +lemma ptrFromPAddr_mask_simp: + "ptrFromPAddr z && ~~ mask (pageBitsForSize l) = + ptrFromPAddr (z && ~~ mask (pageBitsForSize l))" + apply (simp add: ptrFromPAddr_def field_simps) + apply (subst mask_out_add_aligned[OF is_aligned_pptrBaseOffset]) + apply simp + done + +lemma pageBitsForSize_le_canonical_bit: + "pageBitsForSize sz \ canonical_bit" + by (cases sz, simp_all add: pageBits_def ptTranslationBits_def canonical_bit_def) + +lemma data_at_same_size: + assumes dat_sz': + "data_at sz' (ptrFromPAddr base) s" + and dat_sz: + "data_at sz + (ptrFromPAddr (base + (x && mask (pageBitsForSize sz'))) && ~~ mask (pageBitsForSize sz)) s" + and vs: + "pspace_distinct s" "pspace_aligned s" "valid_objs s" + shows "sz' = sz" +proof - + from dat_sz' and dat_sz + have trivial: + "sz' \ sz + \ ptrFromPAddr (base + (x && mask (pageBitsForSize sz'))) && ~~ mask (pageBitsForSize sz) \ + ptrFromPAddr base" + by (auto simp: data_at_def obj_at_def) + have sz_equiv: "(pageBitsForSize sz = pageBitsForSize sz') = (sz' = sz)" + by (clarsimp simp: pageBitsForSize_def ptTranslationBits_def split: vmpage_size.splits) + show ?thesis + apply (rule sz_equiv[THEN iffD1]) + apply (rule ccontr) + apply (drule neq_iff[THEN iffD1]) + using dat_sz' dat_sz vs + apply (cut_tac trivial) prefer 2 + apply (fastforce simp: sz_equiv) + apply (frule(1) data_at_aligned) + apply (elim disjE) + apply (erule(5) data_at_disjoint_equiv) + apply (unfold obj_range_def) + apply (rule mask_in_range[THEN iffD1]) + apply (simp add: obj_bits_def)+ + apply (simp add: mask_lower_twice ptrFromPAddr_mask_simp) + apply (rule arg_cong[where f = ptrFromPAddr]) + apply (subgoal_tac "is_aligned base (pageBitsForSize sz')") + apply (subst neg_mask_add_aligned[OF _ and_mask_less']) + apply simp + apply (fastforce simp: pbfs_less_wb'[unfolded word_bits_def,simplified]) + apply simp + apply (metis is_aligned_addD2 is_aligned_pptrBaseOffset ptrFromPAddr_def) + apply (drule not_sym) + apply (erule(5) data_at_disjoint_equiv) + apply (unfold obj_range_def) + apply (rule mask_in_range[THEN iffD1]) + apply (simp add: obj_bits_def is_aligned_neg_mask)+ + apply (simp add: mask_lower_twice ptrFromPAddr_mask_simp) + apply (rule arg_cong[where f = ptrFromPAddr]) + apply (subgoal_tac "is_aligned base (pageBitsForSize sz')") + apply (rule sym) + apply (subst mask_lower_twice[symmetric]) + apply (erule less_imp_le_nat) + apply (rule arg_cong[where f = "\x. x && ~~ mask z" for z]) + apply (subst neg_mask_add_aligned[OF _ and_mask_less']) + apply simp + apply (fastforce simp: pbfs_less_wb'[unfolded word_bits_def,simplified]) + apply simp + apply (metis is_aligned_addD2 is_aligned_pptrBaseOffset ptrFromPAddr_def) + done +qed + +lemma level_le_2_cases: + "(level :: vm_level) \ 2 \ level = 0 \ level = 1 \ level = 2" + apply clarsimp + apply (erule_tac P="level=2" in swap) + apply (subst (asm) order.order_iff_strict) + apply (erule disjE_R) + apply (clarsimp simp: order.strict_implies_not_eq) + apply (induct level; clarsimp) + apply (drule meta_mp) + apply (erule order.strict_implies_not_eq) + apply (drule meta_mp) + apply (rule bit1.minus_one_leq_less) + apply (erule order.strict_implies_order) + apply (erule bit1.zero_least) + apply clarsimp + apply clarsimp + done + +(* FIXME AARCH64 IF: move *) +lemma canonical_vref_for_levelI: + "\ canonical_address vref; vref < pptr_base \ \ canonical_address (vref_for_level vref level)" + using pt_bits_left_bound[of level] + apply (simp add: canonical_address_def canonical_address_of_def vref_for_level_def + bit_simps) + apply word_bitwise + by (clarsimp simp: canonical_bit_def word_size not_less) + +(* FIXME AARCH64 IF: move *) +lemma pt_bits_left_le_canonical: + "level \ max_pt_level \ pt_bits_left level \ canonical_bit" + apply (drule pt_bits_left_le_max_pt_level) + apply (auto simp add: canonical_bit_def bit_simps split: if_splits) + apply (simp add: Kernel_Config.config_ARM_PA_SIZE_BITS_40_def)+ + apply (case_tac level; clarsimp) + apply (case_tac z; clarsimp) + apply (case_tac n; clarsimp) + apply (clarsimp simp: pt_bits_left_def pageBits_def) + apply (rename_tac n) + apply (case_tac n; clarsimp) + apply (clarsimp simp: pt_bits_left_def pageBits_def ptTranslationBits_def max_pt_level_def split: if_splits) + apply (rename_tac n) + apply (case_tac n; clarsimp) + apply (clarsimp simp: pt_bits_left_def pageBits_def ptTranslationBits_def max_pt_level_def asid_pool_level_def split: if_splits) + apply (rename_tac n) + apply (case_tac n; clarsimp) + apply (clarsimp simp: pt_bits_left_def pageBits_def ptTranslationBits_def max_pt_level_def asid_pool_level_def split: if_splits) + apply (clarsimp simp: pt_bits_left_def pageBits_def ptTranslationBits_def max_pt_level_def asid_pool_level_def pt_bits_left_bound_def split: if_splits) + done + +lemma ptable_lift_data_consistant: + assumes vs: "valid_state s" + and pt_lift: "ptable_lift t s x = Some ptr" + and dat: "data_at sz ((ptrFromPAddr ptr) && ~~ mask (pageBitsForSize sz)) s" + and misc: "get_vspace_of_thread (kheap s) (arch_state s) t \ arm_us_global_vspace (arch_state s)" + shows "ptable_lift t s (x && ~~ mask (pageBitsForSize sz)) = + Some (ptr && ~~ mask (pageBitsForSize sz))" +proof - + have vs': "valid_objs s \ valid_arch_state s \ valid_vspace_objs s + \ pspace_distinct s \ pspace_aligned s" + using vs by (simp add: valid_state_def valid_pspace_def) + thus ?thesis + using pt_lift dat vs' + apply (clarsimp simp: ptable_lift_def split: option.splits) + apply (clarsimp simp: get_page_info_def simp: obind_def split: option.splits) + apply (rule exE[OF vspace_for_asid_get_vspace_of_thread[OF misc(1)]]) + apply (rename_tac level pt pde asid) + apply (case_tac pde; clarsimp simp: pte_info_def) + apply (frule pt_lookup_slot_max_pt_level) + apply (frule vspace_for_asid_vs_lookup) + apply (clarsimp split: if_splits) + apply (frule_tac level=level in valid_vspace_objs_pte) + apply clarsimp + apply (clarsimp simp: pt_lookup_slot_def pt_lookup_slot_from_level_def) + apply (fastforce simp: table_base_pt_slot_offset[OF vs_lookup_table_is_aligned] + dest: valid_arch_state_asid_table dest!: pt_lookup_vs_lookupI + intro: vs_lookup_level) + apply (erule disjE[OF _ _ FalseE]) + prefer 2 + apply (clarsimp simp: pt_lookup_slot_def pt_lookup_slot_from_level_def in_omonad pt_walk.simps) + apply (clarsimp split: if_splits) + apply (fastforce dest: pt_walk_max_level simp: max_pt_level_def2) + apply clarsimp + apply (clarsimp simp: valid_pte_def) + apply (frule data_at_same_size[symmetric]; simp?) + apply (simp add: pageBitsForSize_pt_bits_left) + apply (fold vref_for_level_def) + apply (prop_tac "level_of_vmsize (vmsize_of_level level) = level") + apply (metis data_at_level pageBitsForSize_pt_bits_left) + apply clarsimp + apply (prop_tac "pt_lookup_slot (get_vspace_of_thread (kheap s) (arch_state s) t) + (vref_for_level x level) (ptes_of s) = Some (level, pt)") + apply (case_tac "level = max_pt_level"; clarsimp?) + apply (clarsimp simp: pt_lookup_slot_def pt_lookup_slot_from_level_def in_omonad) + apply (subst (asm) pt_lookup_vs_lookup_eq) + apply clarsimp + apply (clarsimp simp: vspace_for_asid_def) + apply (clarsimp simp: pt_walk.simps) + apply (fastforce dest: pt_walk_max_level simp: max_pt_level_def2 in_omonad) + apply (clarsimp simp: pt_lookup_slot_def pt_lookup_slot_from_level_def in_omonad) + apply (rule exI) + apply (subst pt_walk_vref_for_level_eq[where vref'=x]) + apply clarsimp + (* FIXME AARCH64 IF *) + subgoal sorry (* apply (fastforce dest: level_le_2_cases le_neq_trans simp: max_pt_level_def2 max_def) *) + apply clarsimp + apply fastforce + using vref_for_level_user_region + apply (fastforce simp: vref_for_level_def is_aligned_mask_out_add_eq mask_AND_NOT_mask + pt_bits_left_le_canonical is_aligned_ptrFromPAddr_n_eq + elim: canonical_vref_for_levelI[unfolded vref_for_level_def] + dest: data_at_aligned) + done +qed + +(* FIXME AARCH64 IF: replace original *) +lemma valid_vspace_objs_pte: + "\ ptes_of s pt_t p = Some pte; valid_vspace_objs s; \\ (level, table_base pt_t p) s \ + \ valid_pte level pte s" + apply (clarsimp simp: ptes_of_def in_opt_map_eq) + apply (drule (2) valid_vspace_objsD) + apply (fastforce simp: in_opt_map_eq) + apply simp + done + +lemma ptable_rights_data_consistant: + assumes vs: "valid_state s" + and pt_lift: "ptable_lift t s x = Some ptr" + and dat: "data_at sz ((ptrFromPAddr ptr) && ~~ mask (pageBitsForSize sz)) s" + and misc: "get_vspace_of_thread (kheap s) (arch_state s) t \ + arm_us_global_vspace (arch_state s)" + shows "ptable_rights t s (x && ~~ mask (pageBitsForSize sz)) = ptable_rights t s x" +proof - + have vs': "valid_objs s \ valid_arch_state s \ valid_vspace_objs s + \ pspace_distinct s \ pspace_aligned s" + using vs by (simp add: valid_state_def valid_pspace_def) + thus ?thesis + using pt_lift dat vs' + apply (clarsimp simp: ptable_rights_def ptable_lift_def split: option.splits) + apply (clarsimp simp: get_page_info_def simp: obind_def split: option.splits if_splits) + apply (rule exE[OF vspace_for_asid_get_vspace_of_thread[OF misc(1)]]) + apply (rename_tac level pt pde asid) + apply (case_tac pde; clarsimp simp: pte_info_def) + apply (frule pt_lookup_slot_max_pt_level) + apply (frule vspace_for_asid_vs_lookup) + apply (frule_tac level=level in valid_vspace_objs_pte) + apply clarsimp + apply (clarsimp simp: pt_lookup_slot_def pt_lookup_slot_from_level_def) + apply (fastforce simp: table_base_pt_slot_offset[OF vs_lookup_table_is_aligned] + dest: valid_arch_state_asid_table dest!: pt_lookup_vs_lookupI + intro: vs_lookup_level) + apply (clarsimp simp: valid_pte_def) + apply (frule data_at_same_size[symmetric]; simp?) + apply (simp add: pageBitsForSize_pt_bits_left) + apply (prop_tac "level_of_vmsize (vmsize_of_level level) = level") + apply (metis data_at_level pageBitsForSize_pt_bits_left) + apply clarsimp + apply (fold vref_for_level_def) + apply (prop_tac "pt_lookup_slot (get_vspace_of_thread (kheap s) (arch_state s) t) + (vref_for_level x level) (ptes_of s) = Some (level, pt)") + apply (case_tac "level = max_pt_level"; clarsimp?) + apply (clarsimp simp: pt_lookup_slot_def pt_lookup_slot_from_level_def in_omonad) + apply (subst (asm) pt_lookup_vs_lookup_eq) + apply clarsimp + apply (clarsimp simp: vspace_for_asid_def) + apply (clarsimp simp: pt_walk.simps) + apply (fastforce dest: pt_walk_max_level simp: max_pt_level_def2 in_omonad) + apply (clarsimp simp: pt_lookup_slot_def pt_lookup_slot_from_level_def in_omonad) + apply (rule exI) + apply (subst pt_walk_vref_for_level_eq[where vref'=x]) + (* FIXME AARCH64 IF *) + subgoal sorry (* apply (fastforce dest: level_le_2_cases le_neq_trans simp: max_pt_level_def2 max_def) *) + apply clarsimp + apply fastforce + apply (rule conjI) + apply (fastforce dest: canonical_vref_for_levelI[unfolded vref_for_level_def] + simp: pt_bits_left_le_canonical) + apply clarsimp + using vref_for_level_user_region by fastforce +qed + + +lemma user_op_access_data_at: + "\ invs s; pas_refined aag s; is_subject aag tcb; ptable_lift tcb s x = Some ptr; + data_at sz ((ptrFromPAddr ptr) && ~~ mask (pageBitsForSize sz)) s; + auth \ vspace_cap_rights_to_auth (ptable_rights tcb s x) \ + \ (pasObjectAbs aag tcb, auth, + pasObjectAbs aag (ptrFromPAddr (ptr && ~~ mask (pageBitsForSize sz)))) \ pasPolicy aag" + apply (case_tac "get_vspace_of_thread (kheap s) (arch_state s) tcb = arm_us_global_vspace (arch_state s)") + apply (clarsimp simp: ptable_lift_def ptable_rights_def split: option.splits) + apply (frule get_page_info_gpd_kmaps[rotated 3]) + apply (fastforce simp: invs_valid_global_objs invs_arch_state)+ + apply (frule (1) ptable_lift_data_consistant[rotated 2]) + apply fastforce + apply fastforce + apply (frule (1) ptable_rights_data_consistant[rotated 2]) + apply fastforce + apply fastforce + apply (erule (3) user_op_access) + apply simp + done + +lemma user_frame_at_equiv: + "\ typ_at (AArch (AUserData sz)) p s; equiv_for P kheap s s'; P p \ + \ typ_at (AArch (AUserData sz)) p s'" + by (clarsimp simp: equiv_for_def obj_at_def) + +lemma device_frame_at_equiv: + "\ typ_at (AArch (ADeviceData sz)) p s; equiv_for P kheap s s'; P p \ + \ typ_at (AArch (ADeviceData sz)) p s'" + by (clarsimp simp: equiv_for_def obj_at_def) + +lemma typ_at_user_data_at: + "typ_at (AArch (AUserData sz)) p s \ data_at sz p s" + by (simp add: data_at_def) + +lemma typ_at_device_data_at: + "typ_at (AArch (ADeviceData sz)) p s \ data_at sz p s" + by (simp add: data_at_def) + +lemma requiv_device_mem_eq: + "\ reads_equiv aag s s'; globals_equiv s s'; invs s; invs s'; + is_subject aag (cur_thread s); AllowRead \ ptable_rights_s s x; + ptable_lift_s s x = Some y; pas_refined aag s; pas_refined aag s' \ + \ device_mem s (ptrFromPAddr y) = device_mem s' (ptrFromPAddr y)" + apply (simp add: device_mem_def) + apply (rule conjI) + apply (erule reads_equivE) + apply (clarsimp simp: in_device_frame_def) + apply (rule exI) + apply (rule device_frame_at_equiv) + apply assumption+ + apply (erule_tac f="underlying_memory" in equiv_forE) + apply (frule_tac auth=Read in user_op_access_data_at[where s=s]) + apply (fastforce simp: ptable_lift_s_def ptable_rights_s_def vspace_cap_rights_to_auth_def + | intro typ_at_device_data_at)+ + apply (rule reads_read) + apply (fastforce simp: ptrFromPAddr_mask_simp) + apply clarsimp + apply (frule requiv_ptable_rights_eq, fastforce+) + apply (frule requiv_ptable_lift_eq, fastforce+) + apply (clarsimp simp: globals_equiv_def) + apply (erule notE) + apply (erule reads_equivE) + apply (clarsimp simp: in_device_frame_def) + apply (rule exI) + apply (rule device_frame_at_equiv) + apply assumption+ + apply (erule_tac f="underlying_memory" in equiv_forE) + apply (erule equiv_symmetric[THEN iffD1]) + apply (frule_tac auth=Read in user_op_access_data_at[where s=s']) + apply (fastforce simp: ptable_lift_s_def ptable_rights_s_def vspace_cap_rights_to_auth_def + | intro typ_at_device_data_at)+ + apply (rule reads_read) + apply (fastforce simp: ptrFromPAddr_mask_simp) + done + +lemma requiv_user_mem_eq: + "\ reads_equiv aag s s'; globals_equiv s s'; invs s; invs s'; + is_subject aag (cur_thread s); AllowRead \ ptable_rights_s s x; + ptable_lift_s s x = Some y; pas_refined aag s; pas_refined aag s' \ + \ user_mem s (ptrFromPAddr y) = user_mem s' (ptrFromPAddr y)" + apply (simp add: user_mem_def) + apply (rule conjI) + apply clarsimp + apply (rule context_conjI') + apply (erule reads_equivE) + apply (clarsimp simp: in_user_frame_def) + apply (rule exI) + apply (rule user_frame_at_equiv) + apply assumption+ + apply (erule_tac f="underlying_memory" in equiv_forE) + apply (frule_tac auth=Read in user_op_access_data_at[where s = s]) + apply (fastforce simp: ptable_lift_s_def ptable_rights_s_def vspace_cap_rights_to_auth_def + | intro typ_at_user_data_at)+ + apply (rule reads_read) + apply (fastforce simp: ptrFromPAddr_mask_simp) + apply clarsimp + apply (subgoal_tac "aag_can_read aag (ptrFromPAddr y)") + apply (erule reads_equivE) + apply clarsimp + apply (erule_tac f="underlying_memory" in equiv_forE) + apply simp + apply (frule_tac auth=Read in user_op_access) + apply (fastforce simp: ptable_lift_s_def ptable_rights_s_def vspace_cap_rights_to_auth_def)+ + apply (rule reads_read) + apply simp + apply (frule requiv_ptable_rights_eq, fastforce+) + apply (frule requiv_ptable_lift_eq, fastforce+) + apply (clarsimp simp: globals_equiv_def) + apply (erule notE) + apply (erule reads_equivE) + apply (clarsimp simp: in_user_frame_def) + apply (rule exI) + apply (rule user_frame_at_equiv) + apply assumption+ + apply (erule_tac f="underlying_memory" in equiv_forE) + apply (erule equiv_symmetric[THEN iffD1]) + apply (frule_tac auth=Read in user_op_access_data_at[where s=s']) + apply (fastforce simp: ptable_lift_s_def ptable_rights_s_def vspace_cap_rights_to_auth_def + | intro typ_at_user_data_at)+ + apply (rule reads_read) + apply (fastforce simp: ptrFromPAddr_mask_simp) + done + +lemma ptable_rights_imp_frameD: + "\ ptable_lift t s x = Some y;valid_state s;ptable_rights t s x \ {} \ + \ \sz. data_at sz (ptrFromPAddr y && ~~ mask (pageBitsForSize sz)) s" + apply (subst (asm) addrFromPPtr_ptrFromPAddr_id[symmetric]) + apply (drule ptable_rights_imp_frame) + apply simp+ + apply (rule addrFromPPtr_ptrFromPAddr_id[symmetric]) + apply (auto simp: in_user_frame_def in_device_frame_def + dest!: spec typ_at_user_data_at typ_at_device_data_at) + done + +lemma requiv_user_device_eq: + "\ reads_equiv aag s s'; globals_equiv s s'; invs s; invs s'; + is_subject aag (cur_thread s); AllowRead \ ptable_rights_s s x; + ptable_lift_s s x = Some y; pas_refined aag s; pas_refined aag s' \ + \ device_state (machine_state s) (ptrFromPAddr y) = + device_state (machine_state s') (ptrFromPAddr y)" + apply (simp add: ptable_lift_s_def) + apply (frule ptable_rights_imp_frameD) + apply fastforce + apply (fastforce simp: ptable_rights_s_def) + apply (erule reads_equivE) + apply clarsimp + apply (erule_tac f="device_state" in equiv_forD) + apply (frule_tac auth=Read in user_op_access_data_at[where s = s]) + apply ((fastforce simp: ptable_lift_s_def ptable_rights_s_def vspace_cap_rights_to_auth_def + | intro typ_at_user_data_at)+)[6] + apply (rule reads_read) + apply (frule_tac auth=Read in user_op_access) + apply (fastforce simp: ptable_lift_s_def ptable_rights_s_def vspace_cap_rights_to_auth_def)+ + done + + +definition context_matches_state where + "context_matches_state pl pr pxn ms s \ case ms of (um, ds) \ + pl = ptable_lift_s s |` {x. pr x \ {}} \ + pr = ptable_rights_s s \ + pxn = (\x. pr x \ {} \ ptable_xn_s s x) \ + um = (user_mem s \ ptrFromPAddr) |` {y. \x. pl x = Some y \ AllowRead \ pr x} \ + ds = (device_state (machine_state s) \ ptrFromPAddr) |` + {y. \x. pl x = Some y \ AllowRead \ pr x}" + + +lemma do_user_op_reads_respects_g: + notes split_paired_All[simp del] + shows + "(\pl pr pxn tc ms s. P tc s \ context_matches_state pl pr pxn ms s + \ (\x. uop (cur_thread s) pl pr pxn (tc, ms) = {x})) + \ reads_respects_g aag l (pas_refined aag and invs and is_subject aag \ cur_thread + and (\s. cur_thread s \ idle_thread s) and P tc) + (do_user_op_if uop tc)" + apply (simp add: do_user_op_if_def) + apply (rule use_spec_ev) + apply (rule spec_equiv_valid_add_asm) + apply (rule spec_equiv_valid_add_rel[OF _ reads_equiv_g_refl]) + apply (rule spec_equiv_valid_add_rel'[OF _ affects_equiv_refl]) + apply (rule spec_equiv_valid_inv_gets[where proj=id,simplified]) + apply (clarsimp simp: reads_equiv_g_def) + apply (rule requiv_ptable_rights_eq,simp+)[1] + apply (rule spec_equiv_valid_inv_gets[where proj=id,simplified]) + apply (rule ext) + apply (clarsimp simp: reads_equiv_g_def) + apply (case_tac "ptable_rights_s st x = {}", simp) + apply simp + apply (rule requiv_ptable_xn_eq,simp+)[1] + apply (rule spec_equiv_valid_inv_gets[where proj=id,simplified]) + apply (subst expand_restrict_map_eq,clarsimp) + apply (clarsimp simp: reads_equiv_g_def) + apply (rule requiv_ptable_lift_eq,simp+)[1] + apply (rule spec_equiv_valid_inv_gets[where proj=id,simplified]) + apply (clarsimp simp: reads_equiv_g_def) + apply (rule requiv_cur_thread_eq,fastforce) + apply (rule spec_equiv_valid_inv_gets_more[where proj="\m. dom m \ cw" + and projsnd="\m. m |` cr" for cr and cw]) + apply (rule context_conjI') + apply (subst expand_restrict_map_eq) + apply (clarsimp simp: reads_equiv_g_def restrict_map_def) + apply (rule requiv_user_mem_eq) + apply simp+ + apply fastforce + apply (rule spec_equiv_valid_inv_gets[where proj = "\x. ()", simplified]) + apply (rule spec_equiv_valid_inv_gets_more[where proj = "\m. (m \ ptrFromPAddr) |` cr" for cr]) + apply (rule conjI) + apply (subst expand_restrict_map_eq) + apply (clarsimp simp: restrict_map_def reads_equiv_g_def) + apply (rule requiv_user_device_eq) + apply simp+ + apply (clarsimp simp: globals_equiv_def reads_equiv_g_def) + apply (rule spec_equiv_valid_guard_imp) + apply (wpsimp wp: dmo_user_memory_update_reads_respects_g dmo_device_state_update_reads_respects_g + dmo_device_state_update_reads_respects_g select_ev dmo_wp) + apply clarsimp + apply (rule conjI) + apply clarsimp + apply (drule spec)+ + apply (erule impE) + prefer 2 + apply assumption + apply (clarsimp simp: context_matches_state_def comp_def reads_equiv_g_def globals_equiv_def) + apply (clarsimp simp: reads_equiv_g_def globals_equiv_def) + done + +definition valid_vspace_objs_if where + "valid_vspace_objs_if \ \" + +declare valid_vspace_objs_if_def[simp] + +end + +requalify_consts + AARCH64.do_user_op_if + AARCH64.valid_vspace_objs_if + AARCH64.context_matches_state + +requalify_facts + AARCH64.do_user_op_reads_respects_g + +end diff --git a/proof/infoflow/AARCH64/Example_Valid_State.thy b/proof/infoflow/AARCH64/Example_Valid_State.thy new file mode 100644 index 0000000000..b4f1ea7da4 --- /dev/null +++ b/proof/infoflow/AARCH64/Example_Valid_State.thy @@ -0,0 +1,1944 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +theory Example_Valid_State +imports + "ArchNoninterference" + "Lib.Distinct_Cmd" + "AInvs.KernelInit_AI" +begin + +section \Example\ + +(* This example is a classic 'one way information flow' + example, where information is allowed to flow from Low to High, + but not the reverse. We consider a typical scenario where + shared memory and an notification for notifications are used to + implement a ring-buffer. We consider the NTFN to be in the domain of High, + and the shared memory to be in the domain of Low. *) + +(* basic machine-level declarations that need to happen outside the locale *) + +consts s0_context :: user_context + +(* define the irqs to come regularly every 10 *) + +axiomatization where + irq_oracle_def: "AARCH64.irq_oracle \ \pos. if pos mod 10 = 0 then 10 else 0" + +context begin interpretation Arch . (*FIXME: arch-split*) + +subsection \We show that the authority graph does not let information flow from High to Low\ + +datatype auth_graph_label = High | Low | IRQ0 + +abbreviation partition_label where + "partition_label x \ OrdinaryLabel x" + +definition Sys1AuthGraph :: "(auth_graph_label subject_label) auth_graph" where + "Sys1AuthGraph \ + {(partition_label High, Read, partition_label Low), + (partition_label Low, Notify, partition_label High), + (partition_label Low, Reset, partition_label High), + (SilcLabel, Read, partition_label Low), + (SilcLabel, Notify, partition_label High), + (SilcLabel, Reset, partition_label High)} + \ {(x, a, y). x = y}" + +lemma subjectReads_Low: + "subjectReads Sys1AuthGraph (partition_label Low) = {partition_label Low}" + apply (rule equalityI) + apply (rule subsetI) + apply (erule subjectReads.induct, (fastforce simp: Sys1AuthGraph_def)+) + done + +lemma Low_in_subjectReads_High: + "partition_label Low \ subjectReads Sys1AuthGraph (partition_label High)" + by (simp add: Sys1AuthGraph_def reads_read) + +lemma subjectReads_High: + "subjectReads Sys1AuthGraph (partition_label High) = {partition_label High, partition_label Low}" + apply (rule equalityI) + apply (rule subsetI) + apply (erule subjectReads.induct, (fastforce simp: Sys1AuthGraph_def)+) + apply (auto intro: Low_in_subjectReads_High) + done + +lemma subjectReads_IRQ0: + "subjectReads Sys1AuthGraph (partition_label IRQ0) = {partition_label IRQ0}" + apply (rule equalityI) + apply (rule subsetI) + apply (erule subjectReads.induct, (fastforce simp: Sys1AuthGraph_def)+) + done + +lemma High_in_subjectAffects_Low: + "partition_label High \ subjectAffects Sys1AuthGraph (partition_label Low)" + apply (rule affects_ep) + apply (simp add: Sys1AuthGraph_def) + apply (rule disjI1, simp+) + done + +lemma subjectAffects_Low: + "subjectAffects Sys1AuthGraph (partition_label Low) = {partition_label Low, partition_label High}" + apply (rule equalityI) + apply (rule subsetI) + apply (erule subjectAffects.induct, (fastforce simp: Sys1AuthGraph_def)+) + apply (auto intro: affects_lrefl High_in_subjectAffects_Low) + done + +lemma subjectAffects_High: + "subjectAffects Sys1AuthGraph (partition_label High) = {partition_label High}" + apply (rule equalityI) + apply (rule subsetI) + apply (erule subjectAffects.induct, (fastforce simp: Sys1AuthGraph_def)+) + apply (auto intro: affects_lrefl) + done + +lemma subjectAffects_IRQ0: + "subjectAffects Sys1AuthGraph (partition_label IRQ0) = {partition_label IRQ0}" + apply (rule equalityI) + apply (rule subsetI) + apply (erule subjectAffects.induct, (fastforce simp: Sys1AuthGraph_def)+) + apply (auto intro: affects_lrefl) + done + +lemmas subjectReads = subjectReads_High subjectReads_Low subjectReads_IRQ0 + +lemma partsSubjectAffects_Low: + "partsSubjectAffects Sys1AuthGraph Low = {Partition Low, Partition High}" + by (auto simp: partsSubjectAffects_def image_def label_can_affect_partition_def + subjectReads subjectAffects_Low | case_tac xa, rename_tac xa)+ + +lemma partsSubjectAffects_High: + "partsSubjectAffects Sys1AuthGraph High = {Partition High}" + by (auto simp: partsSubjectAffects_def image_def label_can_affect_partition_def + subjectReads subjectAffects_High | rename_tac xa, case_tac xa)+ + +lemma partsSubjectAffects_IRQ0: + "partsSubjectAffects Sys1AuthGraph IRQ0 = {Partition IRQ0}" + by (auto simp: partsSubjectAffects_def image_def label_can_affect_partition_def + subjectReads subjectAffects_IRQ0 | rename_tac xa, case_tac xa)+ + +lemmas partsSubjectAffects = + partsSubjectAffects_High partsSubjectAffects_Low partsSubjectAffects_IRQ0 + +definition example_policy where + "example_policy \ + {(PSched, d) | d. True} \ {(d,e). d = e} \ {(Partition Low, Partition High)}" + +lemma "policyFlows Sys1AuthGraph = example_policy" + apply (rule equalityI) + apply (rule subsetI) + apply (clarsimp simp: example_policy_def) + apply (erule policyFlows.cases) + apply (case_tac l, auto simp: partsSubjectAffects)[1] + apply assumption + apply (rule subsetI) + apply (clarsimp simp: example_policy_def) + apply (elim disjE) + apply (fastforce simp: partsSubjectAffects intro: policy_affects) + apply (fastforce intro: policy_scheduler) + apply (fastforce intro: policyFlows_refl refl_onD) + done + + +subsection \We show there exists a valid initial state associated to the + above authority graph\ + +text \ + +This example (modified from ../access-control/ExampleSystem) is a system Sys1 made +of 2 main components Low and High, connected through an notification NTFN. +Both Low and High contains: + + . one TCB + . one vspace made up of one top-level page table + - one asid pool with a single entry for the corresponding vspace + . each top-level pt contains a single page table, with access to a shared page in memory + Low can read/write to this page, High can only read + . one cspace made up of one cnode + . each cspace contains 4 caps: + one to the tcb + one to the cnode itself + one to the top level page table + one to the asid pool + one to the shared page + one to the second level page table + one to the ntfn + +Low can send to the ntfn while High can receive from it. + +Attempt to ASCII art: + + -------- ---- ---- -------- + | | | | | | | | + V | | V S R | V | V +Low_tcb(3079)-->Low_cnode(6)--->ntfn(9)<---High_cnode(7)<--High_tcb(3080) + | | | | + V | | V +Low_pd(3063)<---------Low_pool High_pool------------> High_pd(3065) + | | + V R/W R V +Low_pt(3072)---------------->shared_page<-----------------High_pt(3077) + + +(the references are derived from the dump of the SAC system) + + +The aim is to be able to prove + + valid_initial_state s0_internal Sys1PAS timer_irq utf + +where Sys1PAS is the label graph defining the AC policy for Sys1 using +the authority graph defined above and s0 is the state of Sys1 described above. + +\ + +subsubsection \Defining the State\ + +definition "ntfn_ptr \ pptr_base + 0x20" + +definition "Low_tcb_ptr \ pptr_base + 0x400" +definition "High_tcb_ptr = pptr_base + 0x800" +definition "idle_tcb_ptr = pptr_base + 0x1000" + +definition "Low_pt_ptr = pptr_base + 0x4000" +definition "High_pt_ptr = pptr_base + 0x5000" + +definition "Low_pd_ptr = pptr_base + 0x7000" +definition "High_pd_ptr = pptr_base + 0x8000" + +definition "Low_pool_ptr = pptr_base + 0x9000" +definition "High_pool_ptr = pptr_base + 0xA000" + +definition "Low_cnode_ptr = pptr_base + 0x10000" +definition "High_cnode_ptr = pptr_base + 0x18000" +definition "Silc_cnode_ptr = pptr_base + 0x20000" +definition "irq_cnode_ptr = pptr_base + 0x28000" + +definition "shared_page_ptr_virt = pptr_base + 0x200000" +definition "shared_page_ptr_phys = addrFromPPtr shared_page_ptr_virt" + +definition "timer_irq \ 10" (* not sure exactly how this fits in *) + +definition "Low_mcp \ 5 :: priority" +definition "Low_prio \ 5 :: priority" +definition "High_mcp \ 5 :: priority" +definition "High_prio \ 5 :: priority" +definition "Low_time_slice \ 0 :: nat" +definition "High_time_slice \ 5 :: nat" +definition "Low_domain \ 0 :: domain" +definition "High_domain \ 1 :: domain" + +lemmas s0_ptr_defs = + Low_pool_ptr_def High_pool_ptr_def Low_cnode_ptr_def High_cnode_ptr_def Silc_cnode_ptr_def + ntfn_ptr_def irq_cnode_ptr_def Low_pd_ptr_def High_pd_ptr_def Low_pt_ptr_def High_pt_ptr_def + Low_tcb_ptr_def High_tcb_ptr_def idle_tcb_ptr_def timer_irq_def Low_prio_def High_prio_def + Low_time_slice_def Low_domain_def High_domain_def init_irq_node_ptr_def arm_global_pt_ptr_def + pptr_base_def pptrBase_def canonical_bit_def shared_page_ptr_virt_def + +(* Distinctness proof of kernel pointers. *) + +distinct ptrs_distinct[simp]: + Low_tcb_ptr High_tcb_ptr idle_tcb_ptr ntfn_ptr + Low_pt_ptr High_pt_ptr shared_page_ptr_virt Low_pd_ptr High_pd_ptr + Low_cnode_ptr High_cnode_ptr Low_pool_ptr High_pool_ptr + Silc_cnode_ptr irq_cnode_ptr arm_global_pt_ptr + by (auto simp: s0_ptr_defs) + + +text \We need to define the asids of each pd and pt to ensure that +the object is included in the right ASID-label\ + +definition Low_asid :: asid where + "Low_asid \ 1 << asid_low_bits" + +definition High_asid :: asid where + "High_asid \ 2 << asid_low_bits" + +definition Silc_asid :: asid where + "Silc_asid \ 3 << asid_low_bits" + +distinct asid_high_bits_distinct[simp]: + "asid_high_bits_of Low_asid" + "asid_high_bits_of High_asid" + "asid_high_bits_of Silc_asid" + by (auto simp: asid_high_bits_of_def asid_low_bits_def Low_asid_def High_asid_def Silc_asid_def) + +distinct asids_distinct[simp]: + High_asid Low_asid Silc_asid + by (auto simp: Low_asid_def High_asid_def Silc_asid_def asid_low_bits_def) + + +text \converting a nat to a bool list of size 10 - for the cnodes\ + +definition nat_to_bl :: "nat \ nat \ bool list option" where + "nat_to_bl bits n \ + if n \ 2^bits then None + else Some $ bin_to_bl bits (of_nat n)" + +lemma nat_to_bl_id [simp]: "nat_to_bl (size (x :: (('a::len) word))) (unat x) = Some (to_bl x)" + by (clarsimp simp: nat_to_bl_def to_bl_def le_def word_size) + +definition the_nat_to_bl :: "nat \ nat \ bool list" where + "the_nat_to_bl sz n \ the (nat_to_bl sz (n mod 2^sz))" + +abbreviation (input) the_nat_to_bl_10 :: "nat \ bool list" where + "the_nat_to_bl_10 n \ the_nat_to_bl 10 n" + +lemma len_the_nat_to_bl[simp]: + "length (the_nat_to_bl x y) = x" + apply (clarsimp simp: the_nat_to_bl_def nat_to_bl_def) + apply safe + apply (metis le_def mod_less_divisor nat_zero_less_power_iff zero_less_numeral) + apply (clarsimp simp: size_bin_to_bl_aux not_le) + done + +lemma tcb_cnode_index_nat_to_bl [simp]: + "the_nat_to_bl_10 n \ tcb_cnode_index n" + by (clarsimp simp: tcb_cnode_index_def intro!: length_neq) + +lemma mod_less_self [simp]: + "a \ b mod a \ ((a :: nat) = 0)" + by (metis mod_less_divisor nat_neq_iff not_less not_less0) + +lemma split_div_mod: + "a = (b::nat) \ (a div k = b div k \ a mod k = b mod k)" + by (metis mult_div_mod_eq) + +lemma nat_to_bl_eq: + assumes "a < 2 ^ n \ b < 2 ^ n" + shows "nat_to_bl n a = nat_to_bl n b \ a = b" + using assms + apply - + apply (erule disjE_R) + apply (clarsimp simp: nat_to_bl_def) + apply (case_tac "a \ 2 ^ n") + apply (clarsimp simp: nat_to_bl_def) + apply (clarsimp simp: not_le) + apply (induct n arbitrary: a b) + apply (clarsimp simp: nat_to_bl_def) + apply atomize + apply (clarsimp simp: nat_to_bl_def) + apply (erule_tac x="a div 2" in allE) + apply (erule_tac x="b div 2" in allE) + apply (erule impE) + apply (metis power_commutes td_gal_lt zero_less_numeral) + apply (clarsimp simp: bin_last_def zdiv_int) + apply (rule iffI [rotated], clarsimp) + apply (subst (asm) (1 2 3 4) bin_to_bl_aux_alt) + apply (clarsimp simp: mod_eq_dvd_iff) + apply (subst split_div_mod [where k=2]) + apply clarsimp + apply presburger + done + +lemma nat_to_bl_mod_n_eq[simp]: + "nat_to_bl n a = nat_to_bl n b \ ((a = b \ a < 2 ^ n) \ (a \ 2 ^ n \ b \ 2 ^ n))" + apply (rule iffI) + apply (clarsimp simp: not_le) + apply (subst (asm) nat_to_bl_eq, simp) + apply clarsimp + apply (erule disjE) + apply clarsimp + apply (clarsimp simp: nat_to_bl_def) + done + +lemma the_the_eq: + "\ x \ None; y \ None \ \ (the x = the y) = (x = y)" + by auto + +lemma the_nat_to_bl_eq [simp]: + "(the_nat_to_bl n a = the_nat_to_bl m b) \ (n = m \ (a mod 2 ^ n = b mod 2 ^ n))" + apply (case_tac "n = m") + apply (clarsimp simp: the_nat_to_bl_def) + apply (subst the_the_eq) + apply (clarsimp simp: nat_to_bl_def) + apply (clarsimp simp: nat_to_bl_def) + apply simp + apply simp + apply (metis len_the_nat_to_bl) + done + +lemma empty_cnode_eq_Some[simp]: + "(empty_cnode n x = Some y) = (length x = n \ y = NullCap)" + by (clarsimp simp: empty_cnode_def, metis) + +lemma empty_cnode_eq_None[simp]: + "(empty_cnode n x = None) = (length x \ n)" + by (clarsimp simp: empty_cnode_def) + +(* FIXME AARCH64 IF + +text \Low's CSpace\ + +definition Low_caps :: cnode_contents where + "Low_caps \ + (empty_cnode 10) + ((the_nat_to_bl_10 1) + \ ThreadCap Low_tcb_ptr, + (the_nat_to_bl_10 2) + \ CNodeCap Low_cnode_ptr 10 (the_nat_to_bl_10 2), + (the_nat_to_bl_10 3) + \ ArchObjectCap (PageTableCap Low_pd_ptr (Some (Low_asid,0))), + (the_nat_to_bl_10 4) + \ ArchObjectCap (ASIDPoolCap Low_pool_ptr Low_asid), + (the_nat_to_bl_10 5) + \ ArchObjectCap (FrameCap shared_page_ptr_virt vm_read_write RISCVLargePage False (Some (Low_asid,0))), + (the_nat_to_bl_10 6) + \ ArchObjectCap (PageTableCap Low_pt_ptr (Some (Low_asid,0))), + (the_nat_to_bl_10 318) + \ NotificationCap ntfn_ptr 0 {AllowSend})" + +definition Low_cnode :: kernel_object where + "Low_cnode \ CNode 10 Low_caps" + +lemma ran_empty_cnode[simp]: + "ran (empty_cnode C) = {NullCap}" + by (auto simp: empty_cnode_def ran_def Ex_list_of_length intro: set_eqI) + +lemma empty_cnode_app[simp]: + "length x = n \ empty_cnode n x = Some NullCap" + by (auto simp: empty_cnode_def) + +lemma in_ran_If[simp]: + "(x \ ran (\n. if P n then A n else B n)) \ + (\n. P n \ A n = Some x) \ (\n. \ P n \ B n = Some x)" + by (auto simp: ran_def) + +lemma Low_caps_ran: + "ran Low_caps = + {ThreadCap Low_tcb_ptr, + CNodeCap Low_cnode_ptr 10 (the_nat_to_bl_10 2), + ArchObjectCap (PageTableCap Low_pd_ptr (Some (Low_asid,0))), + ArchObjectCap (PageTableCap Low_pt_ptr (Some (Low_asid,0))), + ArchObjectCap (ASIDPoolCap Low_pool_ptr Low_asid), + ArchObjectCap (FrameCap shared_page_ptr_virt vm_read_write RISCVLargePage False (Some (Low_asid,0))), + NotificationCap ntfn_ptr 0 {AllowSend}, + NullCap}" + apply (rule equalityI) + apply (clarsimp simp: Low_caps_def fun_upd_def empty_cnode_def split: if_split_asm) + apply (clarsimp simp: Low_caps_def fun_upd_def empty_cnode_def split: if_split_asm cong: conj_cong) + apply (rule exI[where x="the_nat_to_bl_10 0"]) + apply simp + done + + +text \High's Cspace\ + +definition High_caps :: cnode_contents where + "High_caps \ + (empty_cnode 10) + ((the_nat_to_bl_10 1) + \ ThreadCap High_tcb_ptr, + (the_nat_to_bl_10 2) + \ CNodeCap High_cnode_ptr 10 (the_nat_to_bl_10 2), + (the_nat_to_bl_10 3) + \ ArchObjectCap (PageTableCap High_pd_ptr (Some (High_asid,0))), + (the_nat_to_bl_10 4) + \ ArchObjectCap (ASIDPoolCap High_pool_ptr High_asid), + (the_nat_to_bl_10 5) + \ ArchObjectCap (FrameCap shared_page_ptr_virt vm_read_only RISCVLargePage False (Some (High_asid,0))), + (the_nat_to_bl_10 6) + \ ArchObjectCap (PageTableCap High_pt_ptr (Some (High_asid,0))), + (the_nat_to_bl_10 318) + \ NotificationCap ntfn_ptr 0 {AllowRecv}) " + +definition High_cnode :: kernel_object where + "High_cnode \ CNode 10 High_caps" + +lemma High_caps_ran: + "ran High_caps = + {ThreadCap High_tcb_ptr, + CNodeCap High_cnode_ptr 10 (the_nat_to_bl_10 2), + ArchObjectCap (PageTableCap High_pd_ptr (Some (High_asid,0))), + ArchObjectCap (PageTableCap High_pt_ptr (Some (High_asid,0))), + ArchObjectCap (ASIDPoolCap High_pool_ptr High_asid), + ArchObjectCap (FrameCap shared_page_ptr_virt vm_read_only RISCVLargePage False (Some (High_asid,0))), + NotificationCap ntfn_ptr 0 {AllowRecv}, + NullCap}" + apply (rule equalityI) + apply (clarsimp simp: High_caps_def ran_def empty_cnode_def split: if_split_asm) + apply (clarsimp simp: High_caps_def ran_def empty_cnode_def split: if_split_asm cong: conj_cong) + apply (rule exI [where x="the_nat_to_bl_10 0"]) + apply simp + done + + +text \We need a copy of boundary crossing caps owned by SilcLabel\ + +definition Silc_caps :: cnode_contents where + "Silc_caps \ + (empty_cnode 10) + ((the_nat_to_bl_10 2) + \ CNodeCap Silc_cnode_ptr 10 (the_nat_to_bl_10 2), + (the_nat_to_bl_10 5) + \ ArchObjectCap (FrameCap shared_page_ptr_virt vm_read_only RISCVLargePage False (Some (Silc_asid,0))), + (the_nat_to_bl_10 318) + \ NotificationCap ntfn_ptr 0 {AllowSend} )" + +definition Silc_cnode :: kernel_object where + "Silc_cnode \ CNode 10 Silc_caps" + +lemma Silc_caps_ran: + "ran Silc_caps = + {CNodeCap Silc_cnode_ptr 10 (the_nat_to_bl_10 2), + ArchObjectCap (FrameCap shared_page_ptr_virt vm_read_only RISCVLargePage False (Some (Silc_asid,0))), + NotificationCap ntfn_ptr 0 {AllowSend}, + NullCap}" + apply (rule equalityI) + apply (clarsimp simp: Silc_caps_def ran_def empty_cnode_def) + apply (clarsimp simp: ran_def Silc_caps_def empty_cnode_def cong: conj_cong) + apply (rule_tac x="the_nat_to_bl_10 0" in exI) + apply simp + done + + +text \notification between Low and High\ + +definition ntfn :: kernel_object where + "ntfn \ Notification \ntfn_obj = WaitingNtfn [High_tcb_ptr], ntfn_bound_tcb=None\" + + +text \global page table is mapped into the top-level page tables of each vspace\ + +abbreviation init_global_pt' where + "init_global_pt' \ (\idx. if idx \ kernel_mapping_slots then global_pte idx else InvalidPTE)" + + +text \Low's VSpace (PageDirectory)\ + +abbreviation ppn_from_addr :: "paddr \ pte_ppn" where + "ppn_from_addr addr \ ucast (addr >> pt_bits)" + +abbreviation Low_pt' :: pt where + "Low_pt' \ + (\_. InvalidPTE) + (0 := PagePTE (ppn_from_addr shared_page_ptr_phys) {} vm_read_write)" + +definition Low_pt :: kernel_object where + "Low_pt \ ArchObj (PageTable Low_pt')" + +abbreviation Low_pd' :: pt where + "Low_pd' \ + init_global_pt' + (0 := PageTablePTE (ppn_from_addr (addrFromPPtr Low_pt_ptr)) {})" + +definition Low_pd :: kernel_object where + "Low_pd \ ArchObj (PageTable Low_pd')" + + +text \High's VSpace (PageDirectory)\ + +abbreviation High_pt' :: pt where + "High_pt' \ + (\_. InvalidPTE) + (0 := PagePTE (ppn_from_addr shared_page_ptr_phys) {} vm_read_only)" + +definition High_pt :: kernel_object where + "High_pt \ ArchObj (PageTable High_pt')" + +abbreviation High_pd' :: pt where + "High_pd' \ + init_global_pt' + (0 := PageTablePTE (ppn_from_addr (addrFromPPtr High_pt_ptr)) {})" + +definition High_pd :: kernel_object where + "High_pd \ ArchObj (PageTable High_pd')" + + +text \Low's tcb\ + +definition Low_tcb :: kernel_object where + "Low_tcb \ TCB \tcb_ctable = CNodeCap Low_cnode_ptr 10 (the_nat_to_bl_10 2), + tcb_vtable = ArchObjectCap (PageTableCap Low_pd_ptr (Some (Low_asid,0))), + tcb_reply = ReplyCap Low_tcb_ptr True {AllowGrant, AllowWrite}, + tcb_caller = NullCap, + tcb_ipcframe = NullCap, + tcb_state = Running, + tcb_fault_handler = replicate word_bits False, + tcb_ipc_buffer = 0, + tcb_fault = None, + tcb_bound_notification = None, + tcb_mcpriority = Low_mcp, + tcb_priority = Low_prio, + tcb_time_slice = Low_time_slice, + tcb_domain = Low_domain, + tcb_flags = {}, + tcb_arch = \tcb_context = undefined\\" + + +text \High's tcb\ + +definition High_tcb :: kernel_object where + "High_tcb \ TCB \tcb_ctable = CNodeCap High_cnode_ptr 10 (the_nat_to_bl_10 2) , + tcb_vtable = ArchObjectCap (PageTableCap High_pd_ptr (Some (High_asid,0))), + tcb_reply = ReplyCap High_tcb_ptr True {AllowGrant, AllowWrite}, + tcb_caller = NullCap, + tcb_ipcframe = NullCap, + tcb_state = BlockedOnNotification ntfn_ptr, + tcb_fault_handler = replicate word_bits False, + tcb_ipc_buffer = 0, + tcb_fault = None, + tcb_bound_notification = None, + tcb_mcpriority = High_mcp, + tcb_priority = High_prio, + tcb_time_slice = High_time_slice, + tcb_domain = High_domain, + tcb_flags = {}, + tcb_arch = \tcb_context = undefined\\" + + +text \idle's tcb\ + +definition idle_tcb :: kernel_object where + "idle_tcb \ TCB \tcb_ctable = NullCap, + tcb_vtable = NullCap, + tcb_reply = NullCap, + tcb_caller = NullCap, + tcb_ipcframe = NullCap, + tcb_state = IdleThreadState, + tcb_fault_handler = replicate word_bits False, + tcb_ipc_buffer = 0, + tcb_fault = None, + tcb_bound_notification = None, + tcb_mcpriority = default_priority, + tcb_priority = default_priority, + tcb_time_slice = timeSlice, + tcb_domain = default_domain, + tcb_flags = {}, + tcb_arch = \tcb_context = empty_context\\" + +definition + "irq_cnode \ CNode 0 (Map.empty([] \ cap.NullCap))" + +abbreviation + "Low_pool' \ \idx. if idx = asid_low_bits_of Low_asid then Some Low_pd_ptr else None" + +definition + "Low_pool \ ArchObj (ASIDPool Low_pool')" + +abbreviation + "High_pool' \ \idx. if idx = asid_low_bits_of High_asid then Some High_pd_ptr else None" + +definition + "High_pool \ ArchObj (ASIDPool High_pool')" + +definition + "shared_page \ ArchObj (DataPage False RISCVLargePage)" + +definition kh0 :: kheap where + "kh0 \ (\x. if \irq :: irq. init_irq_node_ptr + (ucast irq << 5) = x + then Some (CNode 0 (empty_cnode 0)) + else None) + (Low_cnode_ptr \ Low_cnode, + High_cnode_ptr \ High_cnode, + Low_pool_ptr \ Low_pool, + High_pool_ptr \ High_pool, + Silc_cnode_ptr \ Silc_cnode, + ntfn_ptr \ ntfn, + irq_cnode_ptr \ irq_cnode, + Low_pd_ptr \ Low_pd, + High_pd_ptr \ High_pd, + Low_pt_ptr \ Low_pt, + High_pt_ptr \ High_pt, + Low_tcb_ptr \ Low_tcb, + High_tcb_ptr \ High_tcb, + idle_tcb_ptr \ idle_tcb, + shared_page_ptr_virt \ shared_page, + arm_global_pt_ptr \ init_global_pt)" + +lemma irq_node_offs_min: + "init_irq_node_ptr \ init_irq_node_ptr + (ucast (irq :: irq) << 5)" + apply (rule_tac sz=59 in machine_word_plus_mono_right_split) + apply (simp add: unat_word_ariths mask_def shiftl_t2n s0_ptr_defs) + apply (cut_tac x=irq and 'a=64 in ucast_less) + apply simp + apply (simp add: word_less_nat_alt) + apply (simp add: word_bits_def) + done + +lemma irq_node_offs_max: + "init_irq_node_ptr + (ucast (irq:: irq) << 5) < init_irq_node_ptr + 0x7E1" + apply (simp add: s0_ptr_defs shiftl_t2n) + apply (cut_tac x=irq and 'a=64 in ucast_less) + apply simp + apply (simp add: word_less_nat_alt unat_word_ariths) + done + +definition irq_node_offs_range where + "irq_node_offs_range \ {x. init_irq_node_ptr \ x \ x < init_irq_node_ptr + 0x7E1} + \ {x. is_aligned x 5}" + +lemma irq_node_offs_in_range: + "init_irq_node_ptr + (ucast (irq:: irq) << 5) \ irq_node_offs_range" + apply (clarsimp simp: irq_node_offs_min irq_node_offs_max irq_node_offs_range_def) + apply (rule is_aligned_add[OF _ is_aligned_shift]) + apply (simp add: is_aligned_def s0_ptr_defs) + done + +lemma irq_node_offs_range_correct: + "x \ irq_node_offs_range + \ \irq. x = init_irq_node_ptr + (ucast (irq:: irq) << 5)" + apply (clarsimp simp: irq_node_offs_min irq_node_offs_max irq_node_offs_range_def s0_ptr_defs) + apply (rule_tac x="ucast ((x - 0xFFFFFFC000003000) >> 5)" in exI) + apply (clarsimp simp: ucast_ucast_mask) + apply (subst aligned_shiftr_mask_shiftl) + apply (rule aligned_sub_aligned) + apply assumption + apply (simp add: is_aligned_def) + apply simp + apply simp + apply (rule_tac n=11 in mask_eqI) + apply (subst mask_add_aligned) + apply (simp add: is_aligned_def) + apply (simp add: mask_twice) + apply (simp add: diff_conv_add_uminus del: add_uminus_conv_diff) + apply (subst add.commute[symmetric]) + apply (subst mask_add_aligned) + apply (simp add: is_aligned_def) + apply simp + apply (simp add: diff_conv_add_uminus del: add_uminus_conv_diff) + apply (subst add_mask_lower_bits) + apply (simp add: is_aligned_def) + apply clarsimp + apply (cut_tac x=x and y="0xFFFFFFC0000037E0" and n=14 in neg_mask_mono_le) + apply (force dest: word_less_sub_1) + apply (drule_tac n=11 in aligned_le_sharp) + apply (simp add: is_aligned_def) + apply (simp add: mask_def is_aligned_mask) + apply word_bitwise + apply fastforce + done + +lemma irq_node_offs_range_distinct[simp]: + "Low_cnode_ptr \ irq_node_offs_range" + "High_cnode_ptr \ irq_node_offs_range" + "Low_pool_ptr \ irq_node_offs_range" + "High_pool_ptr \ irq_node_offs_range" + "Silc_cnode_ptr \ irq_node_offs_range" + "ntfn_ptr \ irq_node_offs_range" + "irq_cnode_ptr \ irq_node_offs_range" + "Low_pd_ptr \ irq_node_offs_range" + "High_pd_ptr \ irq_node_offs_range" + "Low_pt_ptr \ irq_node_offs_range" + "High_pt_ptr \ irq_node_offs_range" + "Low_tcb_ptr \ irq_node_offs_range" + "High_tcb_ptr \ irq_node_offs_range" + "idle_tcb_ptr \ irq_node_offs_range" + "arm_global_pt_ptr \ irq_node_offs_range" + "shared_page_ptr_virt \ irq_node_offs_range" + by(simp add:irq_node_offs_range_def s0_ptr_defs)+ + +lemma irq_node_offs_distinct[simp]: + "init_irq_node_ptr + (ucast (irq:: irq) << 5) \ Low_cnode_ptr" + "init_irq_node_ptr + (ucast (irq:: irq) << 5) \ High_cnode_ptr" + "init_irq_node_ptr + (ucast (irq:: irq) << 5) \ Low_pool_ptr" + "init_irq_node_ptr + (ucast (irq:: irq) << 5) \ High_pool_ptr" + "init_irq_node_ptr + (ucast (irq:: irq) << 5) \ Silc_cnode_ptr" + "init_irq_node_ptr + (ucast (irq:: irq) << 5) \ ntfn_ptr" + "init_irq_node_ptr + (ucast (irq:: irq) << 5) \ irq_cnode_ptr" + "init_irq_node_ptr + (ucast (irq:: irq) << 5) \ Low_pd_ptr" + "init_irq_node_ptr + (ucast (irq:: irq) << 5) \ High_pd_ptr" + "init_irq_node_ptr + (ucast (irq:: irq) << 5) \ Low_pt_ptr" + "init_irq_node_ptr + (ucast (irq:: irq) << 5) \ High_pt_ptr" + "init_irq_node_ptr + (ucast (irq:: irq) << 5) \ Low_tcb_ptr" + "init_irq_node_ptr + (ucast (irq:: irq) << 5) \ High_tcb_ptr" + "init_irq_node_ptr + (ucast (irq:: irq) << 5) \ idle_tcb_ptr" + "init_irq_node_ptr + (ucast (irq:: irq) << 5) \ arm_global_pt_ptr" + "init_irq_node_ptr + (ucast (irq:: irq) << 5) \ shared_page_ptr_virt" + by (simp add:not_inD[symmetric, OF _ irq_node_offs_in_range])+ + +lemma kh0_dom: + "dom kh0 = {shared_page_ptr_virt, arm_global_pt_ptr, idle_tcb_ptr, High_tcb_ptr, Low_tcb_ptr, + High_pt_ptr, Low_pt_ptr, High_pd_ptr, Low_pd_ptr, irq_cnode_ptr, ntfn_ptr, + Silc_cnode_ptr, High_pool_ptr, Low_pool_ptr, High_cnode_ptr, Low_cnode_ptr} + \ irq_node_offs_range" + apply (rule equalityI) + apply (simp add: kh0_def dom_def) + apply (clarsimp simp: irq_node_offs_in_range) + apply (clarsimp simp: dom_def) + apply (rule conjI, clarsimp simp: kh0_def)+ + apply (force simp: kh0_def dest: irq_node_offs_range_correct) + done + +lemmas kh0_SomeD' = set_mp[OF equalityD1[OF kh0_dom[simplified dom_def]], OF CollectI, simplified, OF exI] + +lemma kh0_SomeD: + "kh0 x = Some y \ + x = shared_page_ptr_virt \ y = shared_page \ + x = arm_global_pt_ptr \ y = init_global_pt \ + x = idle_tcb_ptr \ y = idle_tcb \ + x = High_tcb_ptr \ y = High_tcb \ + x = Low_tcb_ptr \ y = Low_tcb \ + x = High_pt_ptr \ y = High_pt \ + x = Low_pt_ptr \ y = Low_pt \ + x = High_pd_ptr \ y = High_pd \ + x = Low_pd_ptr \ y = Low_pd \ + x = irq_cnode_ptr \ y = irq_cnode \ + x = ntfn_ptr \ y = ntfn \ + x = Silc_cnode_ptr \ y = Silc_cnode \ + x = High_pool_ptr \ y = High_pool \ + x = Low_pool_ptr \ y = Low_pool \ + x = High_cnode_ptr \ y = High_cnode \ + x = Low_cnode_ptr \ y = Low_cnode \ + x \ irq_node_offs_range \ y = CNode 0 (empty_cnode 0)" + apply (frule kh0_SomeD') + apply (erule disjE, simp add: kh0_def | force simp: kh0_def split: if_split_asm)+ + done + +lemmas kh0_obj_def = + Low_cnode_def High_cnode_def Silc_cnode_def Low_pool_def High_pool_def Low_pd_def High_pd_def + Low_pt_def High_pt_def Low_tcb_def High_tcb_def idle_tcb_def irq_cnode_def ntfn_def + init_global_pt_def global_pte_def vm_kernel_only_def shared_page_def + + +definition exst0 :: "det_ext" where + "exst0 \ \work_units_completed_internal = undefined, + cdt_list_internal = const []\" + +definition machine_state0 :: "machine_state" where + "machine_state0 \ \irq_masks = (\irq. if irq = timer_irq then False else True), + irq_state = 0, + underlying_memory = const 0, + device_state = Map.empty, + machine_state_rest = undefined\" + +definition arch_state0 :: "arch_state" where + "arch_state0 \ \ + arm_asid_table = [asid_high_bits_of Low_asid \ Low_pool_ptr, + asid_high_bits_of High_asid \ High_pool_ptr], + arm_kernel_vspace = (\level. if level = max_pt_level then {arm_global_pt_ptr} else {}), + riscv_kernel_vspace = init_vspace_uses + \" + +definition s0_internal :: "det_ext state" where + "s0_internal \ \ + kheap = kh0, + cdt = Map.empty, + is_original_cap = (\_. False) ((Low_tcb_ptr, tcb_cnode_index 2) := True, + (High_tcb_ptr, tcb_cnode_index 2) := True), + cur_thread = Low_tcb_ptr, + idle_thread = idle_tcb_ptr, + scheduler_action = resume_cur_thread, + domain_list = [(0, 10), (1, 10)], + domain_index = 0, + cur_domain = 0, + domain_time = 5, + ready_queues = (const (const [])), + machine_state = machine_state0, + interrupt_irq_node = (\irq. init_irq_node_ptr + (ucast irq << 5)), + interrupt_states = (\_. irq_state.IRQInactive) (timer_irq := irq_state.IRQTimer), + arch_state = arch_state0, + exst = exst0 + \" + +lemma kh_s0_def: + "(kheap s0_internal x = Some y) = ( + x = shared_page_ptr_virt \ y = shared_page \ + x = arm_global_pt_ptr \ y = init_global_pt \ + x = idle_tcb_ptr \ y = idle_tcb \ + x = High_tcb_ptr \ y = High_tcb \ + x = Low_tcb_ptr \ y = Low_tcb \ + x = High_pt_ptr \ y = High_pt \ + x = Low_pt_ptr \ y = Low_pt \ + x = High_pd_ptr \ y = High_pd \ + x = Low_pd_ptr \ y = Low_pd \ + x = irq_cnode_ptr \ y = irq_cnode \ + x = ntfn_ptr \ y = ntfn \ + x = Silc_cnode_ptr \ y = Silc_cnode \ + x = High_pool_ptr \ y = High_pool \ + x = Low_pool_ptr \ y = Low_pool \ + x = High_cnode_ptr \ y = High_cnode \ + x = Low_cnode_ptr \ y = Low_cnode \ + x \ irq_node_offs_range \ y = CNode 0 (empty_cnode 0))" + apply (clarsimp simp: s0_internal_def kh0_def) + apply (auto simp: irq_node_offs_in_range dest: irq_node_offs_range_correct) + done + + +subsubsection \Defining the policy graph\ + +definition Sys1AgentMap :: "(auth_graph_label subject_label) agent_map" where + "Sys1AgentMap \ + \ \set the range of the shared_page to Low, default everything else to IRQ0\ + (\p. if p \ ptr_range shared_page_ptr_virt (pageBitsForSize RISCVLargePage) + then partition_label Low + else partition_label IRQ0) + (Low_cnode_ptr := partition_label Low, + High_cnode_ptr := partition_label High, + Low_pool_ptr := partition_label Low, + High_pool_ptr := partition_label High, + ntfn_ptr := partition_label High, + irq_cnode_ptr := partition_label IRQ0, + Silc_cnode_ptr := SilcLabel, + Low_pd_ptr := partition_label Low, + High_pd_ptr := partition_label High, + Low_pt_ptr := partition_label Low, + High_pt_ptr := partition_label High, + Low_tcb_ptr := partition_label Low, + High_tcb_ptr := partition_label High, + idle_tcb_ptr := partition_label Low)" + +lemma Sys1AgentMap_simps: + "Sys1AgentMap Low_cnode_ptr = partition_label Low" + "Sys1AgentMap High_cnode_ptr = partition_label High" + "Sys1AgentMap Low_pool_ptr = partition_label Low" + "Sys1AgentMap High_pool_ptr = partition_label High" + "Sys1AgentMap ntfn_ptr = partition_label High" + "Sys1AgentMap irq_cnode_ptr = partition_label IRQ0" + "Sys1AgentMap Silc_cnode_ptr = SilcLabel" + "Sys1AgentMap Low_pd_ptr = partition_label Low" + "Sys1AgentMap High_pd_ptr = partition_label High" + "Sys1AgentMap Low_pt_ptr = partition_label Low" + "Sys1AgentMap High_pt_ptr = partition_label High" + "Sys1AgentMap Low_tcb_ptr = partition_label Low" + "Sys1AgentMap High_tcb_ptr = partition_label High" + "Sys1AgentMap idle_tcb_ptr = partition_label Low" + "\p. p \ ptr_range shared_page_ptr_virt (pageBitsForSize RISCVLargePage) + \ Sys1AgentMap p = partition_label Low" + unfolding Sys1AgentMap_def + apply simp_all + by (auto simp: s0_ptr_defs ptr_range_def) + +definition Sys1ASIDMap :: "(auth_graph_label subject_label) agent_asid_map" where + "Sys1ASIDMap \ + (\x. if asid_high_bits_of x = asid_high_bits_of Low_asid + then partition_label Low + else if asid_high_bits_of x = asid_high_bits_of High_asid + then partition_label High + else if asid_high_bits_of x = asid_high_bits_of Silc_asid + then SilcLabel + else undefined)" + +(* We include 2 domains, Low is associated to domain 0, High to domain 1, + we default the rest of the possible domains to High *) + +definition Sys1PAS :: "(auth_graph_label subject_label) PAS" where + "Sys1PAS \ + \pasObjectAbs = Sys1AgentMap, + pasASIDAbs = Sys1ASIDMap, + pasIRQAbs = (\_. partition_label IRQ0), + pasPolicy = Sys1AuthGraph, + pasSubject = partition_label Low, + pasMayActivate = True, + pasMayEditReadyQueues = True, pasMaySendIrqs = False, + pasDomainAbs = ((\_. {partition_label High})(0 := {partition_label Low}))\" + + +subsubsection \Proof of pas_refined for Sys1\ + +lemma High_caps_well_formed: "well_formed_cnode_n 10 High_caps" + by (auto simp: High_caps_def well_formed_cnode_n_def split: if_split_asm) + +lemma Low_caps_well_formed: "well_formed_cnode_n 10 Low_caps" + by (auto simp: Low_caps_def well_formed_cnode_n_def split: if_split_asm) + +lemma Silc_caps_well_formed: "well_formed_cnode_n 10 Silc_caps" + by (auto simp: Silc_caps_def well_formed_cnode_n_def split: if_split_asm) + +lemma s0_caps_of_state : + "caps_of_state s0_internal p = Some cap \ + cap = NullCap \ + (p,cap) \ + { ((Low_cnode_ptr,(the_nat_to_bl_10 1)), ThreadCap Low_tcb_ptr), + ((Low_cnode_ptr,(the_nat_to_bl_10 2)), CNodeCap Low_cnode_ptr 10 (the_nat_to_bl_10 2)), + ((Low_cnode_ptr,(the_nat_to_bl_10 3)), ArchObjectCap (PageTableCap Low_pd_ptr (Some (Low_asid,0)))), + ((Low_cnode_ptr,(the_nat_to_bl_10 6)), ArchObjectCap (PageTableCap Low_pt_ptr (Some (Low_asid,0)))), + ((Low_cnode_ptr,(the_nat_to_bl_10 4)), ArchObjectCap (ASIDPoolCap Low_pool_ptr Low_asid)), + ((Low_cnode_ptr,(the_nat_to_bl_10 5)), ArchObjectCap (FrameCap shared_page_ptr_virt vm_read_write RISCVLargePage False (Some (Low_asid, 0)))), + ((Low_cnode_ptr,(the_nat_to_bl_10 318)), NotificationCap ntfn_ptr 0 {AllowSend}), + ((High_cnode_ptr,(the_nat_to_bl_10 1)), ThreadCap High_tcb_ptr), + ((High_cnode_ptr,(the_nat_to_bl_10 2)), CNodeCap High_cnode_ptr 10 (the_nat_to_bl_10 2)), + ((High_cnode_ptr,(the_nat_to_bl_10 3)), ArchObjectCap (PageTableCap High_pd_ptr (Some (High_asid,0)))), + ((High_cnode_ptr,(the_nat_to_bl_10 6)), ArchObjectCap (PageTableCap High_pt_ptr (Some (High_asid,0)))), + ((High_cnode_ptr,(the_nat_to_bl_10 4)), ArchObjectCap (ASIDPoolCap High_pool_ptr High_asid)), + ((High_cnode_ptr,(the_nat_to_bl_10 5)), ArchObjectCap (FrameCap shared_page_ptr_virt vm_read_only RISCVLargePage False (Some (High_asid, 0)))), + ((High_cnode_ptr,(the_nat_to_bl_10 318)), NotificationCap ntfn_ptr 0 {AllowRecv}) , + ((Silc_cnode_ptr,(the_nat_to_bl_10 2)), CNodeCap Silc_cnode_ptr 10 (the_nat_to_bl_10 2)), + ((Silc_cnode_ptr,(the_nat_to_bl_10 5)), ArchObjectCap (FrameCap shared_page_ptr_virt vm_read_only RISCVLargePage False (Some (Silc_asid, 0)))), + ((Silc_cnode_ptr,(the_nat_to_bl_10 318)), NotificationCap ntfn_ptr 0 {AllowSend}), + ((Low_tcb_ptr,(tcb_cnode_index 0)), CNodeCap Low_cnode_ptr 10 (the_nat_to_bl_10 2)), + ((Low_tcb_ptr,(tcb_cnode_index 1)), ArchObjectCap (PageTableCap Low_pd_ptr (Some (Low_asid,0)))), + ((Low_tcb_ptr,(tcb_cnode_index 2)), ReplyCap Low_tcb_ptr True {AllowGrant, AllowWrite}), + ((Low_tcb_ptr,(tcb_cnode_index 3)), NullCap), + ((Low_tcb_ptr,(tcb_cnode_index 4)), NullCap), + ((High_tcb_ptr,(tcb_cnode_index 0)), CNodeCap High_cnode_ptr 10 (the_nat_to_bl_10 2)), + ((High_tcb_ptr,(tcb_cnode_index 1)), ArchObjectCap (PageTableCap High_pd_ptr (Some (High_asid,0)))), + ((High_tcb_ptr,(tcb_cnode_index 2)), ReplyCap High_tcb_ptr True {AllowGrant, AllowWrite}), + ((High_tcb_ptr,(tcb_cnode_index 3)), NullCap), + ((High_tcb_ptr,(tcb_cnode_index 4)), NullCap)} " + supply if_cong[cong] + apply (insert High_caps_well_formed) + apply (insert Low_caps_well_formed) + apply (insert Silc_caps_well_formed) + apply (simp add: caps_of_state_cte_wp_at cte_wp_at_cases s0_internal_def kh0_def kh0_obj_def) + apply (case_tac p, clarsimp) + apply (clarsimp split: if_splits) + apply (clarsimp simp: cte_wp_at_cases tcb_cap_cases_def split: if_split_asm)+ + apply (clarsimp simp: Silc_caps_def split: if_splits) + apply (clarsimp simp: High_caps_def split: if_splits) + apply (clarsimp simp: Low_caps_def split: if_splits) + done + +lemma tcb_states_of_state_s0: + "tcb_states_of_state s0_internal = [High_tcb_ptr \ thread_state.BlockedOnNotification ntfn_ptr, + Low_tcb_ptr \ thread_state.Running, + idle_tcb_ptr \ thread_state.IdleThreadState ]" + unfolding s0_internal_def tcb_states_of_state_def + by (auto simp: get_tcb_def kh0_def kh0_obj_def) + +lemma thread_bounds_of_state_s0: + "thread_bound_ntfns s0_internal = Map.empty" + unfolding s0_internal_def thread_bound_ntfns_def + by (auto simp: get_tcb_def kh0_def kh0_obj_def) + +lemma Sys1_wellformed': + "policy_wellformed (pasPolicy Sys1PAS) False irqs x" + by (clarsimp simp: Sys1PAS_def Sys1AgentMap_simps Sys1AuthGraph_def policy_wellformed_def) + +corollary Sys1_wellformed: + "x \ range (pasObjectAbs Sys1PAS) \ \(range (pasDomainAbs Sys1PAS)) - {SilcLabel} + \ policy_wellformed (pasPolicy Sys1PAS) False irqs x" + by (rule Sys1_wellformed') + +lemma Sys1_pas_wellformed: + "pas_wellformed Sys1PAS" + by (clarsimp simp: Sys1PAS_def Sys1AgentMap_simps Sys1AuthGraph_def policy_wellformed_def) + +lemma domains_of_state_s0[simp]: + "domains_of_state s0_internal = {(High_tcb_ptr, High_domain), + (Low_tcb_ptr, Low_domain), + (idle_tcb_ptr, default_domain)}" + apply (rule equalityI) + apply (rule subsetI) + apply clarsimp + apply (erule domains_of_state_aux.cases) + apply (clarsimp simp: s0_internal_def etcbs_of'_def kh0_def kh0_obj_def split: if_split_asm) + apply (force simp: s0_internal_def etcbs_of'_def kh0_def kh0_obj_def intro: domains_of_state_aux.domtcbs)+ + done + +lemma pool_for_asid_s0: + "pool_for_asid asid s0_internal = (if asid_high_bits_of asid = asid_high_bits_of High_asid + then Some High_pool_ptr + else if asid_high_bits_of asid = asid_high_bits_of Low_asid + then Some Low_pool_ptr + else None)" + by (clarsimp simp: pool_for_asid_def s0_internal_def arch_state0_def) + +lemma asid_pools_of_s0: + "asid_pools_of s0_internal = [Low_pool_ptr \ Low_pool', High_pool_ptr \ High_pool']" + by (auto simp: asid_pools_of_ko_at obj_at_def s0_internal_def opt_map_def kh0_def kh0_obj_def + split: option.splits) + +lemma pts_of_s0: + "pts_of s0_internal = [Low_pd_ptr \ Low_pd', + High_pd_ptr \ High_pd', + Low_pt_ptr \ Low_pt', + High_pt_ptr \ High_pt', + arm_global_pt_ptr \ init_global_pt']" + by (auto simp: opt_map_def s0_internal_def kh0_def kh0_obj_def + split: option.splits if_splits)+ + + +lemma ptes_of_s0_PageTablePTE: + "\ ptes_of s0_internal ptr = Some pte; is_PageTablePTE pte \ + \ table_base ptr = Low_pd_ptr \ pte = PageTablePTE (ppn_from_addr (addrFromPPtr Low_pt_ptr)) {} + \ table_base ptr = High_pd_ptr \ pte = PageTablePTE (ppn_from_addr (addrFromPPtr High_pt_ptr)) {}" + by (auto simp: ptes_of_def pts_of_s0 obind_def kh0_obj_def split: option.splits if_splits) + +lemma Low_pt_is_aligned[simp]: + "is_aligned Low_pt_ptr pt_bits" + by (clarsimp simp: s0_ptr_defs pt_bits_def table_size_def ptTranslationBits_def pte_bits_def word_size_bits_def is_aligned_def) + +lemma High_pt_is_aligned[simp]: + "is_aligned High_pt_ptr pt_bits" + by (clarsimp simp: s0_ptr_defs pt_bits_def table_size_def ptTranslationBits_def pte_bits_def word_size_bits_def is_aligned_def) + +lemma Low_pd_is_aligned[simp]: + "is_aligned Low_pd_ptr pt_bits" + by (clarsimp simp: s0_ptr_defs pt_bits_def table_size_def ptTranslationBits_def pte_bits_def word_size_bits_def is_aligned_def) + +lemma High_pd_is_aligned[simp]: + "is_aligned High_pd_ptr pt_bits" + by (clarsimp simp: s0_ptr_defs pt_bits_def table_size_def ptTranslationBits_def pte_bits_def word_size_bits_def is_aligned_def) + +lemma shared_page_ptr_is_aligned[simp]: + "is_aligned shared_page_ptr_virt pt_bits" + by (clarsimp simp: s0_ptr_defs pt_bits_def table_size_def ptTranslationBits_def pte_bits_def word_size_bits_def is_aligned_def) + +lemma vs_lookup_s0_SomeD: + "vs_lookup_table lvl asid vref s0_internal = Some (lvl', p) + \ (asid_high_bits_of asid = asid_high_bits_of High_asid \ lvl' = asid_pool_level \ p = High_pool_ptr + \ asid_high_bits_of asid = asid_high_bits_of Low_asid \ lvl' = asid_pool_level \ p = Low_pool_ptr + \ asid = High_asid \ lvl' = max_pt_level \ p = High_pd_ptr + \ asid = Low_asid \ lvl' = max_pt_level \ p = Low_pd_ptr + \ asid = High_asid \ lvl' = max_pt_level - 1 \ p = High_pt_ptr + \ asid = Low_asid \ lvl' = max_pt_level - 1 \ p = Low_pt_ptr)" + apply (clarsimp simp: vs_lookup_table_def obind_def split: option.splits if_splits) + apply (clarsimp simp: pool_for_asid_s0 split: if_splits) + apply (case_tac "lvl = max_pt_level") + apply (clarsimp simp: asid_pools_of_s0 pool_for_asid_s0 asid_high_low vspace_for_pool_def + split: if_splits) + apply (case_tac "lvl = max_pt_level - 1") + apply (clarsimp simp: pt_walk.simps split: if_splits) + apply (drule (1) ptes_of_s0_PageTablePTE) + apply (auto simp: pptr_from_pte_def ptrFromPAddr_addr_from_ppn' ptes_of_def asid_high_low + kh0_obj_def pts_of_s0 pool_for_asid_s0 asid_pools_of_s0 vspace_for_pool_def + split: if_splits)[2] + apply (clarsimp simp: pt_walk.simps) + apply (clarsimp split: if_splits) + apply (drule (1) ptes_of_s0_PageTablePTE) + apply (erule disjE; clarsimp) + by (clarsimp simp: pptr_from_pte_def ptrFromPAddr_addr_from_ppn' kh0_obj_def + pt_walk.simps ptes_of_def pts_of_s0 asid_high_low + pool_for_asid_s0 asid_pools_of_s0 vspace_for_pool_def + split: if_splits)+ + +lemma pt_bits_left_max_minus_1_pageBitsForSize: + "pt_bits_left (max_pt_level - 1) = pageBitsForSize RISCVLargePage" + apply (clarsimp simp: pt_bits_left_def max_pt_level_def2) + done + +lemma Sys1_pas_refined: + "pas_refined Sys1PAS s0_internal" + apply (clarsimp simp: pas_refined_def) + apply (intro conjI) + apply (simp add: Sys1_pas_wellformed) + apply (clarsimp simp: irq_map_wellformed_aux_def s0_internal_def Sys1AgentMap_def Sys1PAS_def) + apply (clarsimp simp: s0_ptr_defs ptr_range_def) + apply word_bitwise + apply (clarsimp simp: tcb_domain_map_wellformed_aux_def minBound_word High_domain_def Low_domain_def + Sys1PAS_def Sys1AgentMap_def default_domain_def) + apply (clarsimp simp: auth_graph_map_def Sys1PAS_def state_objs_to_policy_def state_bits_to_policy_def) + apply (erule state_bits_to_policyp.cases; clarsimp) + apply (drule s0_caps_of_state, clarsimp) + apply (simp add: Sys1AuthGraph_def) + apply (elim disjE; clarsimp simp: Sys1AgentMap_simps cap_auth_conferred_def ptr_range_def + arch_cap_auth_conferred_def vspace_cap_rights_to_auth_def + vm_read_write_def vm_read_only_def cap_rights_to_auth_def) + apply (drule s0_caps_of_state, clarsimp) + apply (elim disjE, simp_all)[1] + apply (clarsimp simp: state_refs_of_def thread_st_auth_def tcb_states_of_state_s0 + Sys1AuthGraph_def Sys1AgentMap_simps split: if_splits) + apply (clarsimp simp: state_refs_of_def thread_st_auth_def thread_bounds_of_state_s0) + apply (simp add: s0_internal_def) (* this is OK because cdt is empty..*) + apply (simp add: s0_internal_def) (* this is OK because cdt is empty..*) + apply (clarsimp simp: state_vrefs_def) + apply (drule vs_lookup_s0_SomeD) + apply (elim disjE; clarsimp) + apply ((clarsimp simp: s0_internal_def kh0_obj_def opt_map_def vs_refs_aux_def + vm_read_only_def vspace_cap_rights_to_auth_def pte_ref2_def + Sys1AuthGraph_def Sys1AgentMap_simps graph_of_def ptrFromPAddr_addr_from_ppn' + shared_page_ptr_phys_def pt_bits_left_max_minus_1_pageBitsForSize + dest!: kh0_SomeD split: option.splits if_splits)+)[6] + apply (rule subsetI, clarsimp) + apply (erule state_asids_to_policy_aux.cases) + apply (drule s0_caps_of_state, clarsimp) + apply (fastforce simp: Sys1AuthGraph_def Sys1PAS_def Sys1ASIDMap_def Sys1AgentMap_def + Low_asid_def High_asid_def Silc_asid_def + asid_low_bits_def asid_high_bits_of_def) + apply (clarsimp simp: state_vrefs_def) + apply (drule vs_lookup_s0_SomeD) + apply (clarsimp simp: vs_refs_aux_def s0_internal_def arch_state0_def kh0_def kh0_obj_def + Sys1PAS_def Sys1ASIDMap_def Sys1AgentMap_simps Sys1AuthGraph_def + opt_map_def graph_of_def split: if_splits) + apply (clarsimp simp: Sys1PAS_def Sys1ASIDMap_def Sys1AgentMap_simps Sys1AuthGraph_def + s0_internal_def arch_state0_def split: if_splits) + apply (fastforce elim: state_irqs_to_policy_aux.cases dest: s0_caps_of_state) + done + +lemma Sys1_pas_cur_domain: + "pas_cur_domain Sys1PAS s0_internal" + by (simp add: s0_internal_def exst0_def Sys1PAS_def) + +lemma Sys1_current_subject_idemp: + "Sys1PAS\pasSubject := the_elem (pasDomainAbs Sys1PAS (cur_domain s0_internal))\ = Sys1PAS" + by (simp add: Sys1PAS_def s0_internal_def exst0_def) + +lemma pasMaySendIrqs_Sys1PAS[simp]: + "pasMaySendIrqs Sys1PAS = False" + by(auto simp: Sys1PAS_def) + +lemma Sys1_pas_domains_distinct: + "pas_domains_distinct Sys1PAS" + by (clarsimp simp: Sys1PAS_def pas_domains_distinct_def) + +lemma Sys1_pas_wellformed_noninterference: + "pas_wellformed_noninterference Sys1PAS" + apply (simp add: pas_wellformed_noninterference_def) + apply (intro conjI ballI allI) + apply (blast intro: Sys1_wellformed) + apply (clarsimp simp: Sys1PAS_def policy_wellformed_def Sys1AuthGraph_def) + apply (rule Sys1_pas_domains_distinct) + done + +lemma Sys1AgentMap_shared_page_ptr: + "Sys1AgentMap shared_page_ptr_virt = partition_label Low" + by (clarsimp simp: Sys1AgentMap_def s0_ptr_defs ptr_range_def bit_simps) + +lemma silc_inv_s0: + "silc_inv Sys1PAS s0_internal s0_internal" + apply (clarsimp simp: silc_inv_def) + apply (rule conjI, simp add: Sys1PAS_def) + apply (rule conjI) + apply (clarsimp simp: Sys1PAS_def Sys1AgentMap_def s0_internal_def kh0_def obj_at_def kh0_obj_def + is_cap_table_def Silc_caps_well_formed split: if_split_asm) + apply (rule conjI) + apply (clarsimp simp: Sys1PAS_def Sys1AuthGraph_def) + apply (rule conjI) + apply clarsimp + apply (rule_tac x=Silc_cnode_ptr in exI) + apply (rule conjI) + apply (subgoal_tac "(Silc_cnode_ptr,the_nat_to_bl_10 318) \ slots_holding_overlapping_caps cap s0_internal + \ (Silc_cnode_ptr, the_nat_to_bl_10 5) \ slots_holding_overlapping_caps cap s0_internal") + apply fastforce + apply clarsimp + apply (clarsimp simp: slots_holding_overlapping_caps_def2) + apply (case_tac "cap = NullCap") + apply clarsimp + apply (simp add: cte_wp_at_cases s0_internal_def kh0_def kh0_obj_def) + apply (case_tac a, clarsimp) + apply (clarsimp split: if_splits) + apply ((clarsimp simp: intra_label_cap_def cte_wp_at_cases tcb_cap_cases_def + cap_points_to_label_def split: if_split_asm)+)[8] + apply (clarsimp simp: intra_label_cap_def cap_points_to_label_def) + apply (drule cte_wp_at_caps_of_state' s0_caps_of_state)+ + apply ((erule disjE | + clarsimp simp: Sys1PAS_def Sys1AgentMap_simps + the_nat_to_bl_def nat_to_bl_def ctes_wp_at_def cte_wp_at_cases + s0_internal_def kh0_def kh0_obj_def Silc_caps_well_formed obj_refs_def + | simp add: Silc_caps_def)+)[1] + apply (clarsimp simp: Sys1PAS_def Sys1AgentMap_def) + apply (intro conjI) + apply (clarsimp simp: all_children_def s0_internal_def silc_dom_equiv_def equiv_for_refl) + apply (clarsimp simp: all_children_def s0_internal_def silc_dom_equiv_def equiv_for_refl) + apply (clarsimp simp: Invariants_AI.cte_wp_at_caps_of_state ) + by (auto simp:is_transferable.simps dest:s0_caps_of_state) + + +lemma only_timer_irq_s0: + "only_timer_irq timer_irq s0_internal" + apply (clarsimp simp: only_timer_irq_def s0_internal_def irq_is_recurring_def is_irq_at_def + irq_at_def Let_def irq_oracle_def machine_state0_def timer_irq_def) + apply presburger + done + +lemma domain_sep_inv_s0: + "domain_sep_inv False s0_internal s0_internal" + apply (clarsimp simp: domain_sep_inv_def) + apply (force dest: cte_wp_at_caps_of_state' s0_caps_of_state + | rule conjI allI | clarsimp simp: s0_internal_def)+ + done + +lemma only_timer_irq_inv_s0: + "only_timer_irq_inv timer_irq s0_internal s0_internal" + by (simp add: only_timer_irq_inv_def only_timer_irq_s0 domain_sep_inv_s0) + +lemma Sys1_guarded_pas_domain: + "guarded_pas_domain Sys1PAS s0_internal" + by (clarsimp simp: guarded_pas_domain_def Sys1PAS_def s0_internal_def exst0_def Sys1AgentMap_simps) + +lemma s0_valid_domain_list: + "valid_domain_list s0_internal" + by (clarsimp simp: valid_domain_list_2_def s0_internal_def exst0_def) + +definition + "s0 \ ((if ct_idle s0_internal then idle_context s0_internal else s0_context,s0_internal),KernelExit)" + + +subsubsection \einvs\ + +lemma well_formed_cnode_n_s0_caps[simp]: + "well_formed_cnode_n 10 High_caps" + "well_formed_cnode_n 10 Low_caps" + "well_formed_cnode_n 10 Silc_caps" + "\ well_formed_cnode_n 10 [[] \ NullCap]" + apply (simp add: High_caps_well_formed Low_caps_well_formed Silc_caps_well_formed)+ + apply (fastforce simp: well_formed_cnode_n_def dest: eqset_imp_iff[where x="[]"]) + done + +lemma valid_caps_s0[simp]: + "s0_internal \ ThreadCap Low_tcb_ptr" + "s0_internal \ ThreadCap High_tcb_ptr" + "s0_internal \ CNodeCap Low_cnode_ptr 10 (the_nat_to_bl_10 2)" + "s0_internal \ CNodeCap High_cnode_ptr 10 (the_nat_to_bl_10 2)" + "s0_internal \ CNodeCap Silc_cnode_ptr 10 (the_nat_to_bl_10 2)" + "s0_internal \ ArchObjectCap (ASIDPoolCap Low_pool_ptr Low_asid)" + "s0_internal \ ArchObjectCap (ASIDPoolCap High_pool_ptr High_asid)" + "s0_internal \ ArchObjectCap (PageTableCap Low_pd_ptr (Some (Low_asid,0)))" + "s0_internal \ ArchObjectCap (PageTableCap High_pd_ptr (Some (High_asid,0)))" + "s0_internal \ ArchObjectCap (PageTableCap Low_pt_ptr (Some (Low_asid,0)))" + "s0_internal \ ArchObjectCap (PageTableCap High_pt_ptr (Some (High_asid,0)))" + "s0_internal \ ArchObjectCap (FrameCap shared_page_ptr_virt vm_read_write RISCVLargePage False (Some (Low_asid,0)))" + "s0_internal \ ArchObjectCap (FrameCap shared_page_ptr_virt vm_read_only RISCVLargePage False (Some (High_asid,0)))" + "s0_internal \ ArchObjectCap (FrameCap shared_page_ptr_virt vm_read_only RISCVLargePage False (Some (Silc_asid,0)))" + "s0_internal \ NotificationCap ntfn_ptr 0 {AllowWrite}" + "s0_internal \ NotificationCap ntfn_ptr 0 {AllowRead}" + "s0_internal \ ReplyCap Low_tcb_ptr True {AllowGrant,AllowWrite}" + "s0_internal \ ReplyCap High_tcb_ptr True {AllowGrant,AllowWrite}" + by (auto simp: s0_internal_def s0_ptr_defs kh0_def kh0_obj_def bit_simps word_bits_def + valid_cap_def cap_aligned_def is_aligned_def obj_at_def cte_level_bits_def + is_ntfn_def is_tcb_def is_cap_table_def a_type_def the_nat_to_bl_def nat_to_bl_def + Low_asid_def High_asid_def Silc_asid_def asid_low_bits_def asid_bits_def + wellformed_mapdata_def valid_vm_rights_def vmsz_aligned_def) + +lemma valid_obj_s0[simp]: + "valid_obj Low_cnode_ptr Low_cnode s0_internal" + "valid_obj High_cnode_ptr High_cnode s0_internal" + "valid_obj High_pool_ptr High_pool s0_internal" + "valid_obj Low_pool_ptr Low_pool s0_internal" + "valid_obj Silc_cnode_ptr Silc_cnode s0_internal" + "valid_obj ntfn_ptr ntfn s0_internal" + "valid_obj irq_cnode_ptr irq_cnode s0_internal" + "valid_obj Low_pd_ptr Low_pd s0_internal" + "valid_obj High_pd_ptr High_pd s0_internal" + "valid_obj Low_pt_ptr Low_pt s0_internal" + "valid_obj High_pt_ptr High_pt s0_internal" + "valid_obj Low_tcb_ptr Low_tcb s0_internal" + "valid_obj High_tcb_ptr High_tcb s0_internal" + "valid_obj idle_tcb_ptr idle_tcb s0_internal" + "valid_obj arm_global_pt_ptr init_global_pt s0_internal" + "valid_obj shared_page_ptr_virt shared_page s0_internal" + apply (simp_all add: valid_obj_def kh0_obj_def) + apply (simp add: valid_cs_def Low_caps_ran High_caps_ran Silc_caps_ran + valid_cs_size_def word_bits_def cte_level_bits_def)+ + apply (simp add: valid_ntfn_def obj_at_def s0_internal_def kh0_def High_tcb_def is_tcb_def) + apply (simp add: valid_cs_def valid_cs_size_def word_bits_def + cte_level_bits_def well_formed_cnode_n_def) + apply (clarsimp simp: valid_tcb_def tcb_cap_cases_def valid_tcb_state_def valid_arch_tcb_def + is_valid_vtable_root_def is_master_reply_cap_def is_ntfn_def obj_at_def + wellformed_pte_def valid_vm_rights_def vm_kernel_only_def + | fastforce simp: s0_internal_def kh0_def kh0_obj_def)+ + done + +lemma valid_objs_s0: + "valid_objs s0_internal" + apply (clarsimp simp: valid_objs_def) + apply (subst (asm) s0_internal_def, clarsimp) + apply (drule kh0_SomeD) + apply (elim disjE; clarsimp) + apply (fastforce simp: valid_obj_def valid_cs_def valid_cs_size_def + cte_level_bits_def word_bits_def well_formed_cnode_n_def) + done + +lemma pspace_aligned_s0: + "pspace_aligned s0_internal" + apply (clarsimp simp: pspace_aligned_def s0_internal_def) + apply (drule kh0_SomeD) + apply (auto simp: cte_level_bits_def irq_node_offs_range_def + is_aligned_def s0_ptr_defs kh0_obj_def bit_simps) + done + +lemma pspace_distinct_s0: + "pspace_distinct s0_internal" + apply (clarsimp simp: pspace_distinct_def s0_internal_def) + apply (drule kh0_SomeD)+ + apply (case_tac "x \ irq_node_offs_range \ y \ irq_node_offs_range") + apply clarsimp + apply (drule irq_node_offs_range_correct)+ + apply clarsimp + apply (clarsimp simp: s0_ptr_defs cte_level_bits_def) + apply word_bitwise + apply auto[1] + apply (elim disjE) + (* slow *) + by (simp | clarsimp simp: kh0_obj_def cte_level_bits_def s0_ptr_defs pte_bits_def bit_simps + | fastforce + | clarsimp simp: irq_node_offs_range_def s0_ptr_defs, + drule_tac x="0x1F" in word_plus_strict_mono_right, simp, simp add: add.commute, + drule(1) notE[rotated, OF less_trans, OF _ _ leD, rotated 2] + | drule(1) notE[rotated, OF le_less_trans, OF _ _ leD, rotated 2], simp, assumption)+ + +lemma valid_pspace_s0[simp]: + "valid_pspace s0_internal" + apply (simp add: valid_pspace_def pspace_distinct_s0 pspace_aligned_s0 valid_objs_s0) + apply (rule conjI) + apply (clarsimp simp: if_live_then_nonz_cap_def) + apply (subst (asm) s0_internal_def) + apply (clarsimp simp: ex_nonz_cap_to_def live_def arch_tcb_live_def + hyp_live_def obj_at_def kh0_def kh0_obj_def + split: if_splits) + apply (rule_tac x="High_cnode_ptr" in exI) + apply (rule_tac x="the_nat_to_bl_10 1" in exI) + apply (force simp: s0_internal_def kh0_def kh0_obj_def High_caps_def + cte_wp_at_cases well_formed_cnode_n_def) + apply (rule_tac x="Low_cnode_ptr" in exI) + apply (rule_tac x="the_nat_to_bl_10 1" in exI) + apply (force simp: s0_internal_def kh0_def kh0_obj_def Low_caps_def + cte_wp_at_cases well_formed_cnode_n_def) + apply (rule_tac x="High_cnode_ptr" in exI) + apply (rule_tac x="the_nat_to_bl_10 318" in exI) + apply (force simp: s0_internal_def kh0_def kh0_obj_def High_caps_def + cte_wp_at_cases well_formed_cnode_n_def) + apply (intro conjI) + apply (force dest: s0_caps_of_state simp: cte_wp_at_caps_of_state zombies_final_def is_zombie_def) + apply (clarsimp simp: sym_refs_def state_refs_of_def state_hyp_refs_of_def + refs_of_def s0_internal_def kh0_def kh0_obj_def) + apply (clarsimp simp: sym_refs_def state_hyp_refs_of_def s0_internal_def kh0_def) + done + +lemma descendants_s0[simp]: + "descendants_of (a, b) (cdt s0_internal) = {}" + apply (rule set_eqI) + apply clarsimp + apply (drule descendants_of_NoneD[rotated]) + apply (simp add: s0_internal_def)+ + done + +lemma valid_mdb_s0[simp]: + "valid_mdb s0_internal" + apply (simp add: valid_mdb_def reply_mdb_def) + apply (intro conjI) + apply (clarsimp simp: mdb_cte_at_def s0_internal_def) + apply (force dest: s0_caps_of_state simp: untyped_mdb_def) + apply (clarsimp simp: descendants_inc_def) + apply (clarsimp simp: no_mloop_def s0_internal_def cdt_parent_defs) + apply (clarsimp simp: untyped_inc_def) + apply (drule s0_caps_of_state)+ + apply ((simp | erule disjE)+)[1] + apply (force dest: s0_caps_of_state simp: ut_revocable_def) + apply (force dest: s0_caps_of_state simp: irq_revocable_def) + apply (clarsimp simp: reply_master_revocable_def) + apply (drule s0_caps_of_state) + apply ((simp add: is_master_reply_cap_def s0_internal_def s0_ptr_defs | erule disjE)+)[1] + apply (force dest: s0_caps_of_state simp: reply_caps_mdb_def) + apply (clarsimp simp: reply_masters_mdb_def) + apply (simp add: s0_internal_def) + apply (clarsimp simp: valid_arch_mdb_def) + done + +lemma valid_ioc_s0[simp]: + "valid_ioc s0_internal" + by (clarsimp simp: cte_wp_at_cases valid_ioc_def s0_internal_def kh0_def kh0_obj_def) + +lemma valid_idle_s0[simp]: + "valid_idle s0_internal" + by (clarsimp simp: valid_idle_def valid_arch_idle_def pred_tcb_at_def obj_at_def + idle_thread_ptr_def idle_tcb_def kh0_def s0_ptr_defs s0_internal_def) + +lemma only_idle_s0[simp]: + "only_idle s0_internal" + apply (clarsimp simp: only_idle_def st_tcb_at_tcb_states_of_state_eq + identity_eq[symmetric] tcb_states_of_state_s0) + apply (simp add: s0_ptr_defs s0_internal_def) + done + +lemma if_unsafe_then_cap_s0[simp]: + "if_unsafe_then_cap s0_internal" + apply (clarsimp simp: if_unsafe_then_cap_def ex_cte_cap_wp_to_def) + apply (drule s0_caps_of_state) + apply (case_tac "a=Low_cnode_ptr") + apply (rule_tac x=Low_tcb_ptr in exI, rule_tac x="tcb_cnode_index 0" in exI) + apply (fastforce simp: cte_wp_at_cases s0_internal_def kh0_def kh0_obj_def) + apply (case_tac "a=High_cnode_ptr") + apply (rule_tac x=High_tcb_ptr in exI, rule_tac x="tcb_cnode_index 0" in exI) + apply (fastforce simp: cte_wp_at_cases s0_internal_def kh0_def kh0_obj_def) + apply (case_tac "a=Low_tcb_ptr") + apply (rule_tac x=Low_cnode_ptr in exI, rule_tac x="the_nat_to_bl_10 1" in exI) + apply (fastforce simp: s0_internal_def kh0_def kh0_obj_def Low_caps_def + cte_wp_at_cases well_formed_cnode_n_def) + apply (case_tac "a=High_tcb_ptr") + apply (rule_tac x=High_cnode_ptr in exI, rule_tac x="the_nat_to_bl_10 1" in exI) + apply (fastforce simp: s0_internal_def kh0_def kh0_obj_def High_caps_def + cte_wp_at_cases well_formed_cnode_n_def) + apply (rule_tac x=Silc_cnode_ptr in exI, rule_tac x="the_nat_to_bl_10 2" in exI) + apply (fastforce simp: s0_internal_def kh0_def kh0_obj_def Silc_caps_def + cte_wp_at_cases well_formed_cnode_n_def) + done + +lemma valid_reply_caps_s0[simp]: + "valid_reply_caps s0_internal" + apply (clarsimp simp: valid_reply_caps_def) + apply (rule conjI) + apply (force dest: s0_caps_of_state + simp: cte_wp_at_caps_of_state has_reply_cap_def is_reply_cap_to_def) + apply (clarsimp simp: unique_reply_caps_def) + apply (drule s0_caps_of_state)+ + apply (erule disjE | simp add: is_reply_cap_def)+ + done + +lemma valid_reply_masters_s0[simp]: + "valid_reply_masters s0_internal" + apply (clarsimp simp: valid_reply_masters_def) + apply (force dest: s0_caps_of_state simp: cte_wp_at_caps_of_state is_master_reply_cap_to_def) + done + +lemma valid_global_refs_s0[simp]: + "valid_global_refs s0_internal" + apply (clarsimp simp: valid_global_refs_def valid_refs_def cte_wp_at_caps_of_state) + apply (drule s0_caps_of_state) + apply (clarsimp simp: global_refs_def s0_internal_def arch_state0_def) + apply (erule disjE | simp add: cap_range_def + | clarsimp simp: irq_node_offs_distinct[symmetric] + | simp only: s0_ptr_defs, force)+ + done + +lemma valid_arch_state_s0[simp]: + "valid_arch_state s0_internal" + apply (clarsimp simp: valid_arch_state_def s0_internal_def arch_state0_def) + apply (intro conjI) + apply (auto simp: valid_asid_table_def kh0_def kh0_obj_def opt_map_def split: option.splits)[1] + apply (fastforce simp: valid_global_arch_objs_def obj_at_def kh0_def a_type_def + init_global_pt_def max_pt_level_not_asid_pool_level[symmetric]) + apply (clarsimp simp: valid_global_tables_def pt_walk.simps obind_def) + apply (fastforce dest: pt_walk_max_level + simp: obind_def opt_map_def asid_pool_level_eq geq_max_pt_level pte_of_def kh0_def + kh0_obj_def pte_rights_of_def + split: if_splits) + done + +lemma valid_irq_node_s0[simp]: + "valid_irq_node s0_internal" + apply (clarsimp simp: valid_irq_node_def) + apply (rule conjI) + apply (simp add: s0_internal_def) + apply (rule injI) + apply simp + apply (rule ccontr) + apply (rule_tac bnd="0x40" and 'a=64 in shift_distinct_helper[rotated 3]) + apply assumption + apply simp + apply simp + apply (rule ucast_less[where 'b=6, simplified]) + apply simp + apply (rule ucast_less[where 'b=6, simplified]) + apply simp + apply (rule notI) + apply (drule ucast_up_inj) + apply simp + apply simp + apply (clarsimp simp: obj_at_def s0_internal_def) + apply (force simp: kh0_def is_cap_table_def well_formed_cnode_n_def dom_empty_cnode) + done + +lemma valid_irq_handlers_s0[simp]: + "valid_irq_handlers s0_internal" + apply (clarsimp simp: valid_irq_handlers_def ran_def) + apply (force dest: s0_caps_of_state) + done + +lemma valid_irq_state_s0[simp]: + "valid_irq_states s0_internal" + by (clarsimp simp: valid_irq_states_def valid_irq_masks_def s0_internal_def machine_state0_def) + +lemma valid_machine_state_s0[simp]: + "valid_machine_state s0_internal" + by (clarsimp simp: valid_machine_state_def s0_internal_def const_def + machine_state0_def in_user_frame_def obj_at_def) + +lemma valid_arch_objs_s0[simp]: + "valid_vspace_objs s0_internal" + apply (clarsimp simp: valid_vspace_objs_def obj_at_def) + apply (drule vs_lookup_s0_SomeD) + apply (auto simp: aobjs_of_Some kh_s0_def kh0_obj_def data_at_def obj_at_def + ptrFromPAddr_addr_from_ppn' vmpage_size_of_level_def max_pt_level_def2 + shared_page_ptr_phys_def) + done + +lemma valid_vs_lookup_s0_internal: + "valid_vs_lookup s0_internal" + supply pt_simps = pt_slot_offset_def pt_bits_left_def pt_index_def max_pt_level_def2 + supply user_region_simps = user_region_def canonical_user_def + supply caps_of_state_simps = caps_of_state_def get_cap_def gets_def get_def get_object_def + assert_def assert_opt_def fail_def return_def bind_def + apply (clarsimp simp: valid_vs_lookup_def vs_lookup_target_def vs_lookup_slot_def split: if_splits) + \ \asid pool level\ + apply (drule vs_lookup_level) + apply (clarsimp simp: pool_for_asid_vs_lookup pool_for_asid_s0 asid_pools_of_s0 + vspace_for_pool_def user_region_def vref_for_level_asid_pool + dest!: asid_high_low split: if_splits) + \ \High asid\ + apply (rule conjI, clarsimp simp: High_asid_def asid_low_bits_def) + apply (rule_tac x=High_cnode_ptr in exI) + apply (rule_tac x="(the_nat_to_bl_10 3)" in exI) + apply (fastforce simp: High_cnode_def High_caps_def caps_of_state_def get_cap_def get_object_def + gets_def get_def assert_def assert_opt_def fail_def return_def bind_def + s0_internal_def kh0_def well_formed_cnode_n_def) + \ \Low asid\ + apply (rule conjI, clarsimp simp: Low_asid_def asid_low_bits_def) + apply (rule_tac x=Low_cnode_ptr in exI) + apply (rule_tac x="(the_nat_to_bl_10 3)" in exI) + apply (fastforce simp: Low_cnode_def Low_caps_def caps_of_state_def get_cap_def get_object_def + gets_def get_def assert_def assert_opt_def fail_def return_def bind_def + s0_internal_def kh0_def well_formed_cnode_n_def) + \ \below asid pool level\ + apply (clarsimp simp: vs_lookup_table_def split: if_splits) + apply (clarsimp simp: pt_walk.simps) + apply (case_tac "bot_level < max_pt_level"; clarsimp) + + prefer 2 + \ \bot level = max pt level\ + + apply (clarsimp simp: pool_for_asid_s0 vspace_for_pool_def asid_pools_of_s0 + dest!: asid_high_low split: if_splits) + \ \High asid\ + apply (rule conjI, clarsimp simp: High_asid_def asid_low_bits_def) + apply (rule_tac x=High_cnode_ptr in exI) + apply (rule_tac x="(the_nat_to_bl_10 6)" in exI) + apply (rule_tac x="ArchObjectCap (PageTableCap High_pt_ptr (Some (High_asid,0)))" in exI) + apply (subst (asm) s0_internal_def) + apply (clarsimp simp: in_omonad ptes_of_def High_pd_def ptrFromPAddr_addr_from_ppn' + dest!: kh0_SomeD split: if_splits) + apply (intro conjI) + apply (fastforce simp: High_cnode_def High_caps_def caps_of_state_simps + s0_internal_def kh0_def well_formed_cnode_n_def) + apply (clarsimp simp: vref_for_level_def mask_def pt_simps user_region_simps bit_simps s0_ptr_defs) + apply (word_bitwise, fastforce) + apply (clarsimp simp: kh0_obj_def mask_def pt_simps user_region_simps bit_simps s0_ptr_defs) + apply (rule FalseE, word_bitwise, fastforce simp: elf_index_value) + \ \Low asid\ + apply (rule conjI, clarsimp simp: Low_asid_def asid_low_bits_def) + apply (rule_tac x=Low_cnode_ptr in exI) + apply (rule_tac x="(the_nat_to_bl_10 6)" in exI) + apply (rule_tac x="ArchObjectCap (PageTableCap Low_pt_ptr (Some (Low_asid,0)))" in exI) + apply (subst (asm) s0_internal_def) + apply (clarsimp simp: in_omonad ptes_of_def Low_pd_def ptrFromPAddr_addr_from_ppn' + dest!: kh0_SomeD split: if_splits) + apply (intro conjI) + apply (fastforce simp: Low_cnode_def Low_caps_def caps_of_state_simps + s0_internal_def kh0_def well_formed_cnode_n_def) + apply (clarsimp simp: vref_for_level_def mask_def pt_simps user_region_simps bit_simps s0_ptr_defs) + apply (word_bitwise, fastforce) + apply (clarsimp simp: kh0_obj_def mask_def pt_simps user_region_simps bit_simps s0_ptr_defs) + apply (rule FalseE, word_bitwise, fastforce simp: elf_index_value) + \ \bot level < max pt level\ + apply (clarsimp simp: pool_for_asid_s0 vspace_for_pool_def asid_pools_of_s0 + dest!: asid_high_low split: if_splits) + \ \High asid\ + apply (subst (asm) ptes_of_def) + apply (clarsimp simp: pts_of_s0) + apply (clarsimp simp: in_omonad kh0_obj_def pptr_from_pte_def ptrFromPAddr_addr_from_ppn' + split: if_splits) + apply (rule conjI, clarsimp simp: High_asid_def asid_low_bits_def) + apply (prop_tac "pt_walk (max_pt_level - 1) bot_level High_pt_ptr vref (ptes_of s0_internal) = + Some (max_pt_level - 1, High_pt_ptr)") + apply (clarsimp simp: pt_walk.simps) + apply (clarsimp simp: ptes_of_def pts_of_s0 in_omonad split: if_splits) + apply (clarsimp simp: ptes_of_def pts_of_s0 shared_page_ptr_phys_def ptrFromPAddr_addr_from_ppn' + split: if_splits) + apply (rule_tac x=High_cnode_ptr in exI) + apply (rule_tac x="the_nat_to_bl_10 5" in exI) + apply (rule exI, intro conjI) + apply (fastforce simp: High_cnode_def High_caps_def caps_of_state_simps + s0_internal_def kh0_def well_formed_cnode_n_def) + apply clarsimp + apply (clarsimp simp: vref_for_level_def mask_def pt_simps user_region_simps bit_simps s0_ptr_defs) + apply (word_bitwise, fastforce) + \ \Low asid\ + prefer 2 + apply (subst (asm) ptes_of_def) + apply (clarsimp simp: pts_of_s0) + apply (clarsimp simp: in_omonad kh0_obj_def pptr_from_pte_def ptrFromPAddr_addr_from_ppn' + split: if_splits) + apply (rule conjI, clarsimp simp: Low_asid_def asid_low_bits_def) + apply (prop_tac "pt_walk (max_pt_level - 1) bot_level Low_pt_ptr vref (ptes_of s0_internal) = + Some (max_pt_level - 1, Low_pt_ptr)") + apply (clarsimp simp: pt_walk.simps) + apply (clarsimp simp: ptes_of_def pts_of_s0 in_omonad split: if_splits) + apply (clarsimp simp: ptes_of_def pts_of_s0 shared_page_ptr_phys_def ptrFromPAddr_addr_from_ppn' + split: if_splits) + apply (rule_tac x=Low_cnode_ptr in exI) + apply (rule_tac x="the_nat_to_bl_10 5" in exI) + apply (rule exI, intro conjI) + apply (fastforce simp: Low_cnode_def Low_caps_def caps_of_state_simps + s0_internal_def kh0_def well_formed_cnode_n_def) + apply clarsimp + apply (clarsimp simp: vref_for_level_def mask_def pt_simps user_region_simps bit_simps s0_ptr_defs) + apply (word_bitwise, fastforce) + \ \No lookups to other ptes\ + apply (clarsimp simp: in_omonad ptes_of_def pts_of_s0 split: if_splits) + apply (clarsimp simp: kh0_obj_def mask_def pt_simps user_region_simps bit_simps s0_ptr_defs) + apply (rule FalseE, word_bitwise, fastforce simp: elf_index_value) + apply (clarsimp simp: in_omonad ptes_of_def pts_of_s0 split: if_splits) + apply (clarsimp simp: kh0_obj_def mask_def pt_simps user_region_simps bit_simps s0_ptr_defs) + apply (rule FalseE, word_bitwise, fastforce simp: elf_index_value) + done + +lemma valid_arch_caps_s0[simp]: + "valid_arch_caps s0_internal" + supply if_split[split del] + supply caps_of_state_simps = caps_of_state_def get_cap_def gets_def get_def get_object_def + assert_def assert_opt_def fail_def return_def bind_def + apply (clarsimp simp: valid_arch_caps_def) + apply (intro conjI) + apply (simp add: valid_vs_lookup_s0_internal) + apply (clarsimp simp: valid_asid_pool_caps_def s0_internal_def arch_state0_def) + apply (clarsimp split: if_splits) + apply (rule_tac x="High_cnode_ptr" in exI) + apply (rule_tac x="the_nat_to_bl_10 4" in exI) + apply (force simp: caps_of_state_simps well_formed_cnode_n_def s0_internal_def kh0_obj_def + kh0_def High_caps_def High_asid_def asid_high_bits_of_def asid_low_bits_def + split: if_splits) + apply (rule_tac x="Low_cnode_ptr" in exI) + apply (rule_tac x="the_nat_to_bl_10 4" in exI) + apply (force simp: caps_of_state_simps well_formed_cnode_n_def s0_internal_def kh0_obj_def + kh0_def Low_caps_def Low_asid_def asid_high_bits_of_def asid_low_bits_def + split: if_splits) + apply (clarsimp simp: valid_table_caps_def) + apply (fastforce simp: caps_of_state_simps pts_of_s0 s0_internal_def kh0_obj_def + tcb_cnode_map_def Silc_caps_def High_caps_def Low_caps_def + dest!: kh0_SomeD split: if_splits kernel_object.splits option.splits) + apply (clarsimp simp: unique_table_caps_def) + apply (clarsimp simp: caps_of_state_simps split: if_splits kernel_object.splits option.splits) + apply (subst (asm) s0_internal_def) + apply (clarsimp simp: kh0_def kh0_obj_def Silc_caps_def High_caps_def Low_caps_def + split: if_splits) + apply (subst (asm) s0_internal_def) + apply (clarsimp simp: kh0_def kh0_obj_def Silc_caps_def High_caps_def Low_caps_def + split: if_splits) + apply (clarsimp simp: s0_internal_def kh0_def kh0_obj_def split: if_splits; + clarsimp simp: tcb_cnode_map_def split: if_splits) + apply (clarsimp simp: s0_internal_def kh0_def kh0_obj_def split: if_splits; + clarsimp simp: tcb_cnode_map_def split: if_splits) + apply (clarsimp simp: unique_table_refs_def) + apply (drule s0_caps_of_state)+ + apply clarsimp + apply (elim disjE; clarsimp) + done + +lemma valid_global_objs_s0[simp]: + "valid_global_objs s0_internal" + by (clarsimp simp: valid_global_objs_def s0_internal_def arch_state0_def) + +lemma valid_kernel_mappings_s0[simp]: + "valid_kernel_mappings s0_internal" + by (clarsimp simp: valid_kernel_mappings_def s0_internal_def ran_def + split: kernel_object.splits arch_kernel_obj.splits) + +lemma equal_kernel_mappings_s0[simp]: + "equal_kernel_mappings s0_internal" + supply misc = vref_for_level_def pt_bits_left_def asid_pool_level_size + pageBits_def ptTranslationBits_def mask_def max_pt_level_def2 + apply (clarsimp simp: equal_kernel_mappings_def obj_at_def vspace_for_asid_def + vspace_for_pool_def pool_for_asid_s0 asid_pools_of_s0) + apply (clarsimp simp: obind_def pts_of_s0) + apply (clarsimp simp: has_kernel_mappings_def split: if_splits) + apply (rule conjI; clarsimp) + apply (clarsimp simp: kernel_mapping_slots_def s0_ptr_defs misc) + apply (fastforce simp: pts_of_s0 s0_internal_def arch_state0_def + kh0_obj_def opt_map_def riscv_global_pt_def + dest!: kh0_SomeD split: if_splits option.splits) + apply (clarsimp simp: pts_of_s0) + apply (clarsimp simp: s0_internal_def riscv_global_pt_def arch_state0_def kh0_obj_def + kernel_mapping_slots_def s0_ptr_defs misc elf_index_value)+ + done + +lemma valid_asid_map_s0[simp]: + "valid_asid_map s0_internal" + by (clarsimp simp: valid_asid_map_def s0_internal_def arch_state0_def) + +lemma valid_global_pd_mappings_s0_helper: + "\ pptr_base \ vref; vref < pptr_base + (1 << kernel_window_bits) \ + \ \a b. pt_lookup_target 0 arm_global_pt_ptr vref (ptes_of s0_internal) = Some (a, b) \ + is_aligned b (pt_bits_left a) \ + addrFromPPtr b + (vref && mask (pt_bits_left a)) = addrFromPPtr vref" + supply misc = vref_for_level_def pt_bits_left_def asid_pool_level_size + pageBits_def ptTranslationBits_def mask_def max_pt_level_def2 + apply (clarsimp simp: pt_lookup_target_def obind_def split: option.splits) + apply (prop_tac "pt_lookup_slot_from_level max_pt_level 0 arm_global_pt_ptr vref (ptes_of s0_internal) = + Some (max_pt_level, pt_slot_offset max_pt_level arm_global_pt_ptr vref)") + apply (clarsimp simp: pt_lookup_slot_from_level_def pt_walk.simps) + apply (fastforce simp: ptes_of_def in_omonad s0_internal_def kh0_def init_global_pt_def + global_pte_def is_aligned_pt_slot_offset_pte) + apply (clarsimp simp: pt_lookup_slot_from_level_def pt_walk.simps) + apply (rule conjI; clarsimp dest!: pt_walk_max_level simp: max_pt_level_def2 split: if_splits) + apply (rule conjI; clarsimp) + apply (clarsimp simp: ptes_of_def pts_of_s0 global_pte_def kernel_window_bits_def + table_index_offset_pt_bits_left is_aligned_pt_slot_offset_pte + split: if_splits) + apply (clarsimp simp: misc s0_ptr_defs) + apply (word_bitwise, fastforce) + apply (clarsimp simp: misc s0_ptr_defs kernel_mapping_slots_def) + apply (word_bitwise, fastforce) + apply (clarsimp simp: ptes_of_def pts_of_s0 is_aligned_pt_slot_offset_pte global_pte_def + split: if_splits) + apply (clarsimp simp: addr_from_ppn_def ptrFromPAddr_def addrFromPPtr_def bit_simps + mask_def s0_ptr_defs pt_bits_left_def max_pt_level_def2 + pptrBaseOffset_def paddrBase_def is_aligned_def kernel_window_bits_def) + apply (word_bitwise, fastforce) + apply (clarsimp simp: addr_from_ppn_def ptrFromPAddr_def addrFromPPtr_def bit_simps is_aligned_def + s0_ptr_defs pt_bits_left_def max_pt_level_def2 kernel_mapping_slots_def + mask_def pt_slot_offset_def pt_index_def pptrBaseOffset_def paddrBase_def + toplevel_bits_value elf_index_value kernel_window_bits_def) + apply (word_bitwise, fastforce) + done + +lemma ptes_of_elf_window: + "\kernel_elf_base \ vref; vref < kernel_elf_base + 2 ^ pageBits\ + \ ptes_of s0_internal (pt_slot_offset max_pt_level arm_global_pt_ptr vref) + = Some (global_pte elf_index)" + unfolding ptes_of_def pts_of_s0 + apply (clarsimp simp: obind_def elf_window_4k is_aligned_pt_slot_offset_pte) + done + +lemma valid_global_pd_mappings_s0_helper': + "\ kernel_elf_base \ vref; vref < kernel_elf_base + (1 << pageBits) \ + \ \a b. pt_lookup_target 0 arm_global_pt_ptr vref (ptes_of s0_internal) = Some (a, b) \ + is_aligned b (pt_bits_left a) \ + addrFromPPtr b + (vref && mask (pt_bits_left a)) = addrFromKPPtr vref" + supply misc = vref_for_level_def pt_bits_left_def asid_pool_level_size + pageBits_def ptTranslationBits_def mask_def max_pt_level_def2 + apply (clarsimp simp: pt_lookup_target_def obind_def split: option.splits) + apply (prop_tac "pt_lookup_slot_from_level max_pt_level 0 arm_global_pt_ptr vref (ptes_of s0_internal) = + Some (max_pt_level, pt_slot_offset max_pt_level arm_global_pt_ptr vref)") + apply (clarsimp simp: pt_lookup_slot_from_level_def pt_walk.simps) + apply (fastforce simp: ptes_of_def in_omonad s0_internal_def kh0_def init_global_pt_def + global_pte_def is_aligned_pt_slot_offset_pte) + apply (rule conjI; clarsimp) + apply (rule conjI; clarsimp) + apply (clarsimp simp: pt_lookup_slot_from_level_def pt_walk.simps) + apply (rule conjI; clarsimp) + apply (clarsimp simp: ptes_of_elf_window global_pte_def split: if_splits) + apply (clarsimp simp: ptes_of_elf_window global_pte_def elf_index_value) + apply (clarsimp simp: is_aligned_ptrFromPAddr_kernelELFPAddrBase kernelELFPAddrBase_addrFromKPPtr) + done + +lemma valid_global_pd_mappings_s0[simp]: + "valid_global_vspace_mappings s0_internal" + unfolding valid_global_vspace_mappings_def Let_def + apply (intro conjI) + apply (simp add: s0_internal_def arch_state0_def riscv_global_pt_def) + apply (fastforce simp: s0_internal_def arch_state0_def in_omonad kernel_window_def + init_vspace_uses_def translate_address_def riscv_global_pt_def + dest!: valid_global_pd_mappings_s0_helper split: if_splits) + apply (fastforce simp: translate_address_def in_omonad s0_internal_def arch_state0_def + riscv_global_pt_def kernel_elf_window_def init_vspace_uses_def + dest!: valid_global_pd_mappings_s0_helper' split: if_splits) + done + +lemma pspace_in_kernel_window_s0[simp]: + "pspace_in_kernel_window s0_internal" + apply (clarsimp simp: pspace_in_kernel_window_def kernel_window_def + init_vspace_uses_def s0_internal_def arch_state0_def) + apply (subgoal_tac "x \ {pptr_base.. obj_refs cap = {}") + apply (fastforce dest: s0_caps_of_state) + apply (rule Int_emptyI, clarsimp) + apply (erule swap, clarsimp) + apply (drule s0_caps_of_state) + apply (clarsimp simp: kernel_window_def init_vspace_uses_def s0_internal_def arch_state0_def) + apply (subgoal_tac "x \ {pptr_base..Haskell state\ + +text \One invariant we need on s0 is that there exists + an associated Haskell state satisfying the invariants. + This does not yet exist.\ + +lemma Sys1_valid_initial_state_noenabled: + assumes extras_s0: "step_restrict s0" + assumes utf_det: "\pl pr pxn tc ms s. det_inv InUserMode tc s \ einvs s \ + context_matches_state pl pr pxn ms s \ ct_running s + \ (\x. utf (cur_thread s) pl pr pxn (tc, ms) = {x})" + assumes utf_non_empty: "\t pl pr pxn tc ms. utf t pl pr pxn (tc, ms) \ {}" + assumes utf_non_interrupt: "\t pl pr pxn tc ms e f g. (e,f,g) \ utf t pl pr pxn (tc, ms) + \ e \ Some Interrupt" + assumes det_inv_invariant: "invariant_over_ADT_if det_inv utf" + assumes det_inv_s0: "det_inv KernelExit (cur_context s0_internal) s0_internal" + shows "valid_initial_state_noenabled det_inv utf s0_internal Sys1PAS timer_irq s0_context" + apply (unfold_locales, simp_all only: pasMaySendIrqs_Sys1PAS) + apply (insert det_inv_invariant)[9] + apply (erule(2) invariant_over_ADT_if.det_inv_abs_state) + apply ((erule invariant_over_ADT_if.det_inv_abs_state + invariant_over_ADT_if.check_active_irq_if_Idle_det_inv + invariant_over_ADT_if.check_active_irq_if_User_det_inv + invariant_over_ADT_if.do_user_op_if_det_inv + invariant_over_ADT_if.handle_preemption_if_det_inv + invariant_over_ADT_if.kernel_entry_if_Interrupt_det_inv + invariant_over_ADT_if.kernel_entry_if_det_inv + invariant_over_ADT_if.kernel_exit_if_det_inv + invariant_over_ADT_if.schedule_if_det_inv)+)[8] + apply (rule Sys1_pas_cur_domain) + apply (rule Sys1_pas_wellformed_noninterference) + apply (simp only: einvs_s0) + apply (simp add: Sys1_current_subject_idemp) + apply (simp add: only_timer_irq_inv_s0 silc_inv_s0 Sys1_pas_cur_domain + domain_sep_inv_s0 Sys1_pas_refined Sys1_guarded_pas_domain + idle_equiv_refl) + apply (clarsimp simp: valid_domain_list_2_def s0_internal_def exst0_def) + apply (simp add: det_inv_s0) + apply (simp add: s0_internal_def exst0_def) + apply (simp add: ct_in_state_def st_tcb_at_tcb_states_of_state_eq + identity_eq[symmetric] tcb_states_of_state_s0) + apply (simp add: s0_ptr_defs s0_internal_def) + apply (simp add: s0_internal_def exst0_def) + apply (rule utf_det) + apply (rule utf_non_empty) + apply (rule utf_non_interrupt) + apply (simp add: extras_s0[simplified s0_def]) + done + +text \the extra assumptions in valid_initial_state of being enabled, + and a serial system, follow from ADT_IF_Refine\ +*) + +end + +end diff --git a/proof/infoflow/ADT_IF.thy b/proof/infoflow/ADT_IF.thy index 97cc5cf80b..673148fdb9 100644 --- a/proof/infoflow/ADT_IF.thy +++ b/proof/infoflow/ADT_IF.thy @@ -917,18 +917,6 @@ locale ADT_IF_1 = "do_user_op_if uop tc \guarded_pas_domain aag\" and tcb_arch_ref_tcb_context_set[simp]: "tcb_arch_ref (tcb_arch_update (arch_tcb_context_set uc) tcb) = tcb_arch_ref tcb" - and arch_switch_to_idle_thread_pspace_aligned[wp]: - "arch_switch_to_idle_thread \\s :: det_ext state. pspace_aligned s\" - and arch_switch_to_idle_thread_valid_vspace_objs[wp]: - "arch_switch_to_idle_thread \\s :: det_ext state. valid_vspace_objs s\" - and arch_switch_to_idle_thread_valid_arch_state[wp]: - "arch_switch_to_idle_thread \\s :: det_ext state. valid_arch_state s\" - and arch_switch_to_thread_pspace_aligned[wp]: - "arch_switch_to_thread t \\s :: det_ext state. pspace_aligned s\" - and arch_switch_to_thread_valid_vspace_objs[wp]: - "arch_switch_to_thread t \\s :: det_ext state. valid_vspace_objs s\" - and arch_switch_to_thread_valid_arch_state[wp]: - "arch_switch_to_thread t \\s :: det_ext state. valid_arch_state s\" and arch_switch_to_thread_cur_thread[wp]: "\P. arch_switch_to_thread t \\s :: det_state. P (cur_thread s)\" and arch_activate_idle_thread_cur_thread[wp]: @@ -999,12 +987,8 @@ locale ADT_IF_1 = "create_cap type bits untyped dev sl \\s. P (irq_state_of_state s)\" and arch_invoke_irq_control_irq_state_of_state[wp]: "arch_invoke_irq_control ici \\s. P (irq_state_of_state s)\" - and thread_set_pas_refined: - "\ \tcb. \(getF, v)\ran tcb_cap_cases. getF (f tcb) = getF tcb; - \tcb. tcb_state (f tcb) = tcb_state tcb; - \tcb. tcb_bound_notification (f tcb) = tcb_bound_notification tcb; - \tcb. tcb_domain (f tcb) = tcb_domain tcb \ - \ thread_set f t \pas_refined aag\" + and thread_set_context_pas_refined: + "thread_set (tcb_arch_update (arch_tcb_context_set ctxt)) t \pas_refined aag\" begin lemma kernel_entry_silc_inv[wp]: @@ -1017,7 +1001,7 @@ lemma kernel_entry_silc_inv[wp]: unfolding kernel_entry_if_def by (wpsimp simp: ran_tcb_cap_cases arch_tcb_update_aux2 wp: hoare_weak_lift_imp handle_event_silc_inv thread_set_silc_inv thread_set_invs_trivial - thread_set_not_state_valid_sched thread_set_pas_refined + thread_set_not_state_valid_sched thread_set_context_pas_refined | wp (once) hoare_vcg_imp_lift | force)+ lemma kernel_entry_pas_refined[wp]: @@ -1028,7 +1012,7 @@ lemma kernel_entry_pas_refined[wp]: \\_. pas_refined aag\" unfolding kernel_entry_if_def by (wpsimp simp: ran_tcb_cap_cases schact_is_rct_def arch_tcb_update_aux2 - wp: hoare_vcg_imp_lift' handle_event_pas_refined thread_set_pas_refined + wp: hoare_vcg_imp_lift' handle_event_pas_refined thread_set_context_pas_refined guarded_pas_domain_lift thread_set_invs_trivial thread_set_not_state_valid_sched)+ lemma kernel_entry_if_domain_sep_inv: @@ -1394,6 +1378,30 @@ definition ADT_A_if :: kernel_handle_preemption_if kernel_schedule_if kernel_exit_A_if \ {(s,s'). step_restrict s'})\" +(* FIXME AARCH64 IF: not true. Assumes non_kernel_irqs are empty, requires a rework to irq_at + +definition irq_at' :: "bool \ nat \ (irq \ bool) \ irq option" where + "irq_at' in_kernel pos masks \ + let i = irq_oracle pos in (if masks i \ in_kernel \ i \ non_kernel_IRQs then None else Some i)" + +*) +context begin interpretation Arch . + +lemma dmo_getActiveIRQ_wp: + "\\s. P (irq_at (irq_state (machine_state s) + 1) (irq_masks (machine_state s))) + (s\machine_state := (machine_state s\irq_state := irq_state (machine_state s) + 1\)\)\ + do_machine_op (getActiveIRQ in_kernel) + \P\" + apply (simp add: do_machine_op_def getActiveIRQ_def non_kernel_IRQs_def) + apply (wp modify_wp | wpc)+ + apply clarsimp + apply (erule use_valid) + apply (wp modify_wp) + apply (auto simp: Let_def non_kernel_IRQs_def irq_at_def split: if_splits) + sorry + +end + lemma check_active_irq_if_wp: "\\s. P ((irq_at (irq_state (machine_state s) + 1) (irq_masks (machine_state s))),tc) (s\machine_state := (machine_state s\irq_state := irq_state (machine_state s) + 1\)\)\ @@ -2427,7 +2435,7 @@ lemma preemption_point_irq_state_inv'[wp]: apply (wpsimp wp: OR_choiceE_wp[where P'="irq_state_inv st and K(irq_is_recurring irq st)" and P''="irq_state_inv st and K(irq_is_recurring irq st)"] simp: reset_work_units_def)+ - apply simp + apply simp apply (wpsimp wp: OR_choiceE_wp dmo_getActiveIRQ_wp simp: reset_work_units_def)+ apply (clarsimp simp: irq_state_inv_def) apply (simp add: next_irq_state_Suc[OF _ recurring_next_irq_state_dom]) diff --git a/proof/infoflow/ARM/ArchADT_IF.thy b/proof/infoflow/ARM/ArchADT_IF.thy index c6ff5a396b..1397f8c0cd 100644 --- a/proof/infoflow/ARM/ArchADT_IF.thy +++ b/proof/infoflow/ARM/ArchADT_IF.thy @@ -123,12 +123,6 @@ lemma tcb_arch_ref_tcb_context_set[ADT_IF_assms, simp]: "tcb_arch_ref (tcb_arch_update (arch_tcb_context_set tc) tcb) = tcb_arch_ref tcb" by (simp add: tcb_arch_ref_def) -crunch arch_switch_to_idle_thread, arch_switch_to_thread - for pspace_aligned[ADT_IF_assms, wp]: "\s :: det_state. pspace_aligned s" - and valid_vspace_objs[ADT_IF_assms, wp]: "\s :: det_state. valid_vspace_objs s" - and valid_arch_state[ADT_IF_assms, wp]: "\s :: det_state. valid_arch_state s" - (wp: crunch_wps) - crunch arch_activate_idle_thread, arch_switch_to_thread for cur_thread[ADT_IF_assms, wp]: "\s. P (cur_thread s)" diff --git a/proof/infoflow/CNode_IF.thy b/proof/infoflow/CNode_IF.thy index 6f389db0b1..244da6cc7f 100644 --- a/proof/infoflow/CNode_IF.thy +++ b/proof/infoflow/CNode_IF.thy @@ -324,11 +324,8 @@ locale CNode_IF_1 = "\globals_equiv s and valid_global_objs and valid_arch_state\ set_cap cap p \\_. globals_equiv s\" - and dmo_getActiveIRQ_wp: - "\\s. P (irq_at (irq_state (machine_state s) + 1) (irq_masks (machine_state s))) - (s\machine_state := machine_state s\irq_state := irq_state (machine_state s) + 1\\)\ - do_machine_op (getActiveIRQ in_kernel) - \\rv s :: 's state. P rv s\" + and dmo_getActiveIRQ_globals_equiv: + "\globals_equiv st\ do_machine_op (getActiveIRQ in_kernel) \\_. globals_equiv st\" and arch_globals_equiv_irq_state_update[simp]: "arch_globals_equiv ct it kh kh' as as' ms (irq_state_update f ms') = arch_globals_equiv ct it kh kh' as as' ms ms'" @@ -478,12 +475,6 @@ lemma only_timer_irq_inv_determines_irq_masks: apply fastforce+ done -lemma dmo_getActiveIRQ_globals_equiv: - "\globals_equiv st\ do_machine_op (getActiveIRQ in_kernel) \\_. globals_equiv st\" - apply (wp dmo_getActiveIRQ_wp) - apply (auto simp: globals_equiv_def idle_equiv_def) - done - crunch reset_work_units, work_units_limit_reached, update_work_units for only_timer_irq_inv[wp]: "only_timer_irq_inv irq st" (simp: only_timer_irq_inv_def only_timer_irq_def irq_is_recurring_def is_irq_at_def) diff --git a/proof/infoflow/Decode_IF.thy b/proof/infoflow/Decode_IF.thy index 59e600b1f3..3c47069cbf 100644 --- a/proof/infoflow/Decode_IF.thy +++ b/proof/infoflow/Decode_IF.thy @@ -349,7 +349,7 @@ begin lemma decode_invocation_reads_respects_f: "reads_respects_f aag l - (silc_inv aag st and pas_refined aag and valid_cap cap and invs and ct_active + (silc_inv aag st and pas_refined aag and invs and ct_active and cte_wp_at ((=) cap) slot and ex_cte_cap_to slot and (\s. \r\zobj_refs cap. ex_nonz_cap_to r s) and (\s. \r\cte_refs cap (interrupt_irq_node s). ex_cte_cap_to r s) diff --git a/proof/infoflow/FinalCaps.thy b/proof/infoflow/FinalCaps.thy index bb64a834dd..11a500db9f 100644 --- a/proof/infoflow/FinalCaps.thy +++ b/proof/infoflow/FinalCaps.thy @@ -335,16 +335,12 @@ locale FinalCaps_1 = "arch_finalise_cap c x \silc_inv aag st\" and prepare_thread_delete_silc_inv[wp]: "prepare_thread_delete p \silc_inv aag st\" - and handle_reserved_irq_silc_inv[wp]: - "handle_reserved_irq irq \silc_inv aag st\" - and arch_mask_irq_signal_silc_inv[wp]: - "arch_mask_irq_signal irq \silc_inv aag st\" and handle_vm_fault_silc_inv[wp]: "handle_vm_fault t vmft \silc_inv aag st\" + and arch_mask_irq_signal_silc_inv[wp]: + "arch_mask_irq_signal irq \silc_inv aag st\" and handle_vm_fault_cur_thread[wp]: "\P. handle_vm_fault t vmft \\s :: det_state. P (cur_thread s)\" - and handle_hypervisor_fault_silc_inv[wp]: - "handle_hypervisor_fault t hvft \silc_inv aag st\" and arch_activate_idle_threadt_silc_inv[wp]: "arch_activate_idle_thread t \silc_inv aag st\" and arch_switch_to_idle_thread_silc_inv[wp]: @@ -2725,6 +2721,12 @@ lemma thread_set_tcb_fault_handler_update_silc_inv[wp]: \\_. silc_inv aag st\" by (rule thread_set_silc_inv; simp add: tcb_cap_cases_def) +lemma thread_set_tcb_arch_update_silc_inv[wp]: + "\silc_inv aag st\ + thread_set (tcb_arch_update blah) t + \\_. silc_inv aag st\" + by (rule thread_set_silc_inv; simp add: tcb_cap_cases_def) + lemma thread_set_tcb_fault_handler_update_invs: "\invs and K (length a = word_bits)\ thread_set (tcb_fault_handler_update (\y. a)) word @@ -2872,6 +2874,16 @@ crunch timer_tick, handle_yield for silc_inv[wp]: "silc_inv aag st" (simp: tcb_cap_cases_def) +end + + +locale FinalCaps_3 = FinalCaps_2 + + assumes handle_reserved_irq_silc_inv[wp]: + "handle_reserved_irq irq \silc_inv aag st\" + and handle_hypervisor_fault_silc_inv[wp]: + "handle_hypervisor_fault t hvft \silc_inv aag st\" +begin + lemma handle_interrupt_silc_inv: "handle_interrupt irq \silc_inv aag st\" unfolding handle_interrupt_def by (wpsimp wp: hoare_drop_imps) @@ -2902,6 +2914,7 @@ crunch schedule ignore: set_scheduler_action simp: crunch_simps) +(* FIXME AARCH64 IF: is this used? *) lemma call_kernel_silc_inv: "\silc_inv aag st and einvs and simple_sched_action and pas_refined aag and (\s. ev \ Interrupt \ ct_active s) diff --git a/proof/infoflow/IRQMasks_IF.thy b/proof/infoflow/IRQMasks_IF.thy index 5adb9602c0..c9dbfaf301 100644 --- a/proof/infoflow/IRQMasks_IF.thy +++ b/proof/infoflow/IRQMasks_IF.thy @@ -60,8 +60,6 @@ locale IRQMasks_IF_1 = "send_signal ntfnptr badge \\s. P (irq_masks_of_state s)\" and handle_vm_fault_irq_masks[wp]: "handle_vm_fault t vmft \\s. P (irq_masks_of_state s)\" - and handle_hypervisor_fault_irq_masks[wp]: - "handle_hypervisor_fault t hvft \\s. P (irq_masks_of_state s)\" and handle_interrupt_irq_masks: "\(\s. P (irq_masks_of_state s)) and domain_sep_inv False (st :: 's state) and K (irq \ maxIRQ)\ handle_interrupt irq @@ -78,8 +76,6 @@ locale IRQMasks_IF_1 = \\rv s :: det_state. (\x. rv = Some x \ x \ maxIRQ)\" and activate_thread_irq_masks[wp]: "activate_thread \\s. P (irq_masks_of_state s)\" - and schedule_irq_masks[wp]: - "schedule \\s. P (irq_masks_of_state s)\" and handle_spurious_irq_masks[wp]: "handle_spurious_irq \\s. P (irq_masks_of_state s)\" begin @@ -300,6 +296,10 @@ locale IRQMasks_IF_2 = IRQMasks_IF_1 state_t "\(\s. P (irq_masks_of_state s)) and domain_sep_inv False (st :: 's state) and tcb_inv_wf tinv\ invoke_tcb tinv \\_ s. P (irq_masks_of_state s)\" + and handle_hypervisor_fault_irq_masks[wp]: + "handle_hypervisor_fault t hvft \\s. P (irq_masks_of_state s)\" + and schedule_irq_masks[wp]: + "schedule \\s. P (irq_masks_of_state s)\" begin crunch invoke_domain diff --git a/proof/infoflow/Ipc_IF.thy b/proof/infoflow/Ipc_IF.thy index 852a5476c4..00f7dd8ed6 100644 --- a/proof/infoflow/Ipc_IF.thy +++ b/proof/infoflow/Ipc_IF.thy @@ -159,7 +159,7 @@ locale Ipc_IF_1 = and handle_arch_fault_reply_reads_respects: "reads_respects aag l (K (aag_can_read aag thread)) (handle_arch_fault_reply afault thread x y)" and arch_get_sanitise_register_info_reads_respects[wp]: - "reads_respects aag l \ (arch_get_sanitise_register_info t)" + "reads_respects aag l (K (aag_can_read_or_affect aag l t)) (arch_get_sanitise_register_info t)" and arch_get_sanitise_register_info_valid_global_objs[wp]: "arch_get_sanitise_register_info t \\s :: det_state. valid_global_objs s\" and handle_arch_fault_reply_valid_global_objs[wp]: @@ -2049,8 +2049,7 @@ end lemma reply_from_kernel_globals_equiv: - "\globals_equiv s and valid_objs and valid_arch_state and valid_global_refs and pspace_distinct - and pspace_aligned and (\s. thread \ idle_thread s)\ + "\globals_equiv s and valid_arch_state and (\s. thread \ idle_thread s)\ reply_from_kernel thread x \\_. globals_equiv s\" unfolding reply_from_kernel_def @@ -2205,10 +2204,8 @@ lemma handle_reply_reads_respects_g: lemma reply_from_kernel_reads_respects_g: assumes domains_distinct: "pas_domains_distinct aag" shows - "reads_respects_g aag l (valid_global_objs and valid_objs and valid_arch_state - and valid_global_refs and pspace_distinct - and pspace_aligned and (\s. thread \ idle_thread s) - and K (is_subject aag thread)) + "reads_respects_g aag l (valid_arch_state and (\s. thread \ idle_thread s) + and K (is_subject aag thread)) (reply_from_kernel thread x)" apply (rule equiv_valid_guard_imp[OF reads_respects_g]) apply (rule reply_from_kernel_reads_respects[OF domains_distinct]) diff --git a/proof/infoflow/Noninterference.thy b/proof/infoflow/Noninterference.thy index c3adc512f2..6df4c65584 100644 --- a/proof/infoflow/Noninterference.thy +++ b/proof/infoflow/Noninterference.thy @@ -570,7 +570,13 @@ lemmas integrity_subjects_device = integrity_subjects_def[THEN meta_eq_to_obj_eq, THEN iffD1, THEN conjunct2, THEN conjunct2, THEN conjunct2, THEN conjunct2, THEN conjunct2, THEN conjunct2, THEN conjunct1] lemmas integrity_subjects_asids = - integrity_subjects_def[THEN meta_eq_to_obj_eq, THEN iffD1, THEN conjunct2, THEN conjunct2, THEN conjunct2, THEN conjunct2, THEN conjunct2, THEN conjunct2, THEN conjunct2] + integrity_subjects_def[THEN meta_eq_to_obj_eq, THEN iffD1, THEN conjunct2, THEN conjunct2, THEN conjunct2, THEN conjunct2, THEN conjunct2, THEN conjunct2, THEN conjunct2, THEN conjunct1] + +lemmas integrity_subjects_hyps = + integrity_subjects_def[THEN meta_eq_to_obj_eq, THEN iffD1, THEN conjunct2, THEN conjunct2, THEN conjunct2, THEN conjunct2, THEN conjunct2, THEN conjunct2, THEN conjunct2, THEN conjunct2, THEN conjunct1] + +lemmas integrity_subjects_fpus = + integrity_subjects_def[THEN meta_eq_to_obj_eq, THEN iffD1, THEN conjunct2, THEN conjunct2, THEN conjunct2, THEN conjunct2, THEN conjunct2, THEN conjunct2, THEN conjunct2, THEN conjunct2, THEN conjunct2] lemma pas_wellformed_pasSubject_update_Control: "\ pas_wellformed (aag\pasSubject := pasObjectAbs aag p\); @@ -683,14 +689,20 @@ locale Noninterference_1 = (do_machine_op (getActiveIRQ in_kernel))" and handle_spurious_irq_reads_respects_scheduler[wp]: "reads_respects_scheduler aag l \ handle_spurious_irq" - (* FIXME IF: precludes ARM_HYP *) - and getActiveIRQ_no_non_kernel_IRQs: - "getActiveIRQ True = getActiveIRQ False" and valid_cur_hyp_triv: "valid_cur_hyp s" and arch_tcb_get_registers_equality: "arch_tcb_get_registers (tcb_arch tcb) = arch_tcb_get_registers (tcb_arch tcb') \ tcb_arch (tcb :: tcb) = tcb_arch (tcb' :: tcb)" + and getActiveIRQ_ev2: + "equiv_valid_2 (scheduler_equiv aag) + (scheduler_affects_equiv aag l) (scheduler_affects_equiv aag l) + (\irq irq'. irq = irq' \ irq = None \ irq' \ Some ` non_kernel_IRQs) + (\s. irq_masks_of_state st = irq_masks_of_state s) + (\s. irq_masks_of_state st = irq_masks_of_state s) + (do_machine_op (getActiveIRQ True)) (do_machine_op (getActiveIRQ False))" + and non_kernel_IRQs_le_maxIRQ: + "irq \ non_kernel_IRQs \ irq \ maxIRQ" begin lemma integrity_update_reference_state: @@ -721,7 +733,8 @@ lemma kernel_entry_if_integrity: \ cur_thread s = cur_thread st" in hoare_strengthen_post) apply (wp handle_event_integrity handle_event_cur_thread | simp)+ apply (fastforce intro: integrity_update_reference_state) - apply (wp thread_set_integrity_autarch thread_set_pas_refined guarded_pas_domain_lift + (* FIXME AARCH64 IF: removing valid_cur_hyp_triv propagates valid_cur_hyp through multiple lemmas *) + apply (wp thread_set_integrity_autarch thread_set_context_pas_refined guarded_pas_domain_lift thread_set_invs_trivial thread_set_not_state_valid_sched | simp add: tcb_cap_cases_def schact_is_rct_def arch_tcb_update_aux2 valid_cur_hyp_triv)+ apply (wp thread_set_tcb_context_update_wp)+ @@ -761,7 +774,7 @@ lemma kernel_entry_if_partitionIntegrity: (\ s. ev \ Interrupt \ ct_active s)" in silc_dom_equiv_from_silc_inv_valid') apply (wp kernel_entry_silc_inv[where st'=st'], simp add: schact_is_rct_simple) - apply (fastforce simp: pas_refined_pasMayActivate_update pas_refined_pasMayEditReadyQueues_update + apply (fastforce simp: pas_refined_pasMayEditReadyQueues_update globals_equiv_scheduler_refl silc_dom_equiv_def equiv_for_refl domain_fields_equiv_def ct_active_not_idle') apply (fastforce simp: partitionIntegrity_def) @@ -1319,8 +1332,7 @@ lemma partsSubjectAffects_bounds_subjects_affects: partsSubjectAffects_def image_def label_can_affect_partition_def) apply (safe del: iffI notI) apply (fastforce dest: partitionIntegrity_subjectAffects_obj) - apply ((auto dest: partitionIntegrity_subjectAffects_obj - partitionIntegrity_subjectAffects_mem + apply ((auto dest: partitionIntegrity_subjectAffects_mem partitionIntegrity_subjectAffects_device partitionIntegrity_subjectAffects_cdt partitionIntegrity_subjectAffects_cdt_list @@ -1333,7 +1345,7 @@ lemma partsSubjectAffects_bounds_subjects_affects: OF domains_distinct] domains_distinct[THEN pas_domains_distinct_inj] | fastforce simp: partitionIntegrity_def - silc_dom_equiv_def equiv_for_def)+)[11] + silc_dom_equiv_def equiv_for_def)+)[10] apply ((fastforce intro: affects_lrefl simp: partitionIntegrity_def domain_fields_equiv_def dest: domains_distinct[THEN pas_domains_distinct_inj])+)[16] @@ -1342,17 +1354,6 @@ lemma partsSubjectAffects_bounds_subjects_affects: end -lemma cur_thread_not_SilcLabel: - "\ silc_inv aag st s; invs s \ \ pasObjectAbs aag (cur_thread s) \ SilcLabel" - apply (rule notI) - apply (simp add: silc_inv_def) - apply (drule tcb_at_invs) - apply clarify - apply (drule_tac x="cur_thread s" in spec, erule (1) impE) - apply (auto simp: obj_at_def is_tcb_def is_cap_table_def) - apply (case_tac ko, simp_all) - done - lemma ev_add_pre: "equiv_valid_inv I A P f \ equiv_valid_inv I A (P and Q) f" apply (rule equiv_valid_guard_imp) @@ -1362,7 +1363,7 @@ lemma ev_add_pre: crunch check_active_irq_if for invs[wp]: "einvs" - (wp: dmo_getActiveIRQ_wp ignore: do_machine_op) + (ignore: do_machine_op) crunch thread_set for schact_is_rct[wp]: "schact_is_rct" @@ -2815,7 +2816,7 @@ lemma kernel_entry_if_reads_respects_f_g: apply (simp add: kernel_entry_if_def) apply (wp handle_event_reads_respects_f_g thread_set_tcb_context_update_reads_respects_f_g thread_set_tcb_context_update_silc_inv only_timer_irq_inv_pres[where P="\" and Q="\"] - thread_set_invs_trivial thread_set_not_state_valid_sched thread_set_pas_refined + thread_set_invs_trivial thread_set_not_state_valid_sched thread_set_context_pas_refined | simp add: tcb_cap_cases_def arch_tcb_update_aux2)+ apply (elim conjE) apply (frule (1) ct_active_cur_thread_not_idle_thread[OF invs_valid_idle]) @@ -3438,6 +3439,22 @@ lemma handle_preemption_agnostic_tc: context Noninterference_1 begin +lemma handle_non_kernel_IRQ_ev2: + "equiv_valid_2 I A A R P (domain_sep_inv False st and K (irq \ non_kernel_IRQs)) + LHS (handle_interrupt irq >>= RHS)" + unfolding handle_interrupt_def + apply (rule EquivValid.gen_asm_ev2_r) + apply (case_tac "maxIRQ < irq") + apply (fastforce dest: non_kernel_IRQs_le_maxIRQ) + apply (clarsimp simp: bind_assoc) + apply (rule_tac Q="\irq _. irq = IRQInactive" in equiv_valid_2_bind_right) + apply (rule gen_asm_ev2_r) + apply (clarsimp simp: fail_ev2_r) + apply (wpsimp simp: get_irq_state_def)+ + apply (fastforce simp: domain_sep_inv_def) + apply simp + done + lemma preemption_interrupt_scheduler_invisible: assumes domains_distinct[wp]: "pas_domains_distinct (aag :: 'a subject_label PAS)" shows "equiv_valid_2 (scheduler_equiv aag) (scheduler_affects_equiv aag l) @@ -3453,21 +3470,24 @@ lemma preemption_interrupt_scheduler_invisible: and (\s. ct_idle s \ uc' = idle_context s) and (\s. \ reads_scheduler_cur_domain aag l s)) (handle_preemption_if uc) (kernel_entry_if Interrupt uc')" - apply (simp add: kernel_entry_if_def handle_preemption_if_def maybe_handle_interrupt_def - getActiveIRQ_no_non_kernel_IRQs) + apply (simp add: kernel_entry_if_def handle_preemption_if_def maybe_handle_interrupt_def) apply (rule equiv_valid_2_bind_right) apply (rule equiv_valid_2_bind_right) apply (simp add: liftE_def bind_assoc) apply (simp only: option.case_eq_if) - apply (rule equiv_valid_2_bind_pre[where R'="(=)"]) + apply (rule equiv_valid_2_bind_pre[OF _ getActiveIRQ_ev2]) + apply (elim disjE) apply (simp split del: if_split) apply (rule equiv_valid_2_bind_pre[where R'="(=)" and Q="\\" and Q'="\\"]) apply (rule return_ev2) apply simp apply (rule equiv_valid_2) apply (wp handle_interrupt_reads_respects_scheduler[where st=st and st'=st'] | simp)+ - apply (rule equiv_valid_2) - apply (rule dmo_getActive_IRQ_reads_respect_scheduler) + apply clarsimp + apply (rule equiv_valid_2_guard_imp) + apply (rule handle_non_kernel_IRQ_ev2) + apply simp + apply fastforce apply (wp dmo_getActiveIRQ_return_axiom[simplified try_some_magic] | simp add: imp_conjR arch_tcb_update_aux2 | elim conjE @@ -3530,7 +3550,7 @@ lemma kernel_entry_scheduler_equiv_2: | wp (once) hoare_drop_imps)+ apply (rule context_update_cur_thread_snippit) apply (wp thread_set_invs_trivial guarded_pas_domain_lift - thread_set_pas_refined thread_set_not_state_valid_sched + thread_set_context_pas_refined thread_set_not_state_valid_sched | simp add: tcb_cap_cases_def arch_tcb_update_aux2)+ apply (fastforce simp: silc_inv_not_cur_thread cur_thread_idle)+ done diff --git a/proof/infoflow/RISCV64/ArchADT_IF.thy b/proof/infoflow/RISCV64/ArchADT_IF.thy index d68d1d16f8..10a7c093a9 100644 --- a/proof/infoflow/RISCV64/ArchADT_IF.thy +++ b/proof/infoflow/RISCV64/ArchADT_IF.thy @@ -81,12 +81,6 @@ lemma tcb_arch_ref_tcb_context_set[ADT_IF_assms, simp]: "tcb_arch_ref (tcb_arch_update (arch_tcb_context_set tc) tcb) = tcb_arch_ref tcb" by (simp add: tcb_arch_ref_def) -crunch arch_switch_to_idle_thread, arch_switch_to_thread - for pspace_aligned[ADT_IF_assms, wp]: "\s :: det_state. pspace_aligned s" - and valid_vspace_objs[ADT_IF_assms, wp]: "\s :: det_state. valid_vspace_objs s" - and valid_arch_state[ADT_IF_assms, wp]: "\s :: det_state. valid_arch_state s" - (wp: crunch_wps simp: crunch_simps) - crunch arch_activate_idle_thread, arch_switch_to_thread for cur_thread[ADT_IF_assms, wp]: "\s. P (cur_thread s)" diff --git a/proof/infoflow/RISCV64/ArchUserOp_IF.thy b/proof/infoflow/RISCV64/ArchUserOp_IF.thy index 7e0168d38f..137a0ef333 100644 --- a/proof/infoflow/RISCV64/ArchUserOp_IF.thy +++ b/proof/infoflow/RISCV64/ArchUserOp_IF.thy @@ -566,9 +566,20 @@ proof - apply (frule_tac level=level in valid_vspace_objs_pte) apply clarsimp apply (clarsimp simp: pt_lookup_slot_def pt_lookup_slot_from_level_def) - apply (fastforce simp: table_base_pt_slot_offset[OF vs_lookup_table_is_aligned] - dest: valid_arch_state_asid_table dest!: pt_lookup_vs_lookupI - intro: vs_lookup_level) + apply (drule pt_lookup_vs_lookupI) + apply (fastforce simp: table_base_pt_slot_offset[OF vs_lookup_table_is_aligned]) + apply (clarsimp simp: table_base_pt_slot_offset[OF vs_lookup_table_is_aligned]) + apply (rule_tac x=asid in exI) + apply (rule_tac x=x in exI) + apply (subst table_base_pt_slot_offset[OF vs_lookup_table_is_aligned]) + apply fastforce + apply fastforce + apply fastforce + apply fastforce + apply (fastforce dest: valid_arch_state_asid_table) + apply fastforce + apply clarsimp + apply (erule vs_lookup_level) apply (erule disjE[OF _ _ FalseE]) prefer 2 apply (clarsimp simp: pt_lookup_slot_def pt_lookup_slot_from_level_def in_omonad pt_walk.simps) diff --git a/proof/infoflow/Tcb_IF.thy b/proof/infoflow/Tcb_IF.thy index 8ebed3048a..a7cbdc81cc 100644 --- a/proof/infoflow/Tcb_IF.thy +++ b/proof/infoflow/Tcb_IF.thy @@ -191,7 +191,7 @@ locale Tcb_IF_1 = and arch_post_modify_registers_reads_respects_f[wp]: "reads_respects_f aag l \ (arch_post_modify_registers cur t)" and arch_get_sanitise_register_info_reads_respects_f[wp]: - "reads_respects_f aag l \ (arch_get_sanitise_register_info t)" + "reads_respects_f aag l (K (aag_can_read_or_affect aag l t)) (arch_get_sanitise_register_info t)" begin crunch cap_swap_for_delete @@ -295,9 +295,12 @@ locale Tcb_IF_2 = Tcb_IF_1 + and K (authorised_tcb_inv aag ti \ authorised_tcb_inv_extra aag ti)) (invoke_tcb ti)" and arch_post_set_flags_globals_equiv[wp]: - "arch_post_set_flags t flags \globals_equiv st\" + "\globals_equiv st and invs\ + arch_post_set_flags t flags + \\_. globals_equiv st\" and arch_post_set_flags_reads_respects_f: - "reads_respects_f aag l \ (arch_post_set_flags t flags)" + "pas_domains_distinct aag \ + reads_respects_f aag l (silc_inv aag st) (arch_post_set_flags t flags)" begin crunch suspend, restart @@ -306,6 +309,8 @@ crunch suspend, restart crunch set_flags for globals_equiv[wp]: "globals_equiv st" + and valid_arch_state[wp]: "valid_arch_state" + (simp: ran_tcb_cap_cases) lemma invoke_tcb_globals_equiv: "\invs and globals_equiv st and tcb_inv_wf ti\ @@ -327,8 +332,7 @@ lemma invoke_tcb_globals_equiv: apply (simp del: invoke_tcb.simps tcb_inv_wf.simps) apply (wp invoke_tcb_thread_preservation cap_delete_globals_equiv cap_insert_globals_equiv'' thread_set_globals_equiv set_mcpriority_globals_equiv - set_priority_globals_equiv - | fastforce)+ + set_priority_globals_equiv | fastforce)+ done end @@ -481,6 +485,10 @@ lemma thread_set_tcb_flags_update_silc_inv[wp]: "thread_set (tcb_flags_update f) t \silc_inv aag st\" by (rule thread_set_silc_inv; simp add: tcb_cap_cases_def) +crunch set_flags + for silc_inv[wp]: "silc_inv aag st" + (ignore: thread_set) + lemma set_flags_reads_respects_f: assumes "pas_domains_distinct aag" shows "reads_respects_f aag l (silc_inv aag st) (set_flags t flags)" diff --git a/proof/infoflow/refine/AARCH64/ArchADT_IF_Refine.thy b/proof/infoflow/refine/AARCH64/ArchADT_IF_Refine.thy new file mode 100644 index 0000000000..3fd515bd2c --- /dev/null +++ b/proof/infoflow/refine/AARCH64/ArchADT_IF_Refine.thy @@ -0,0 +1,398 @@ +(* + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +theory ArchADT_IF_Refine +imports ADT_IF_Refine +begin + +context Arch begin global_naming RISCV64 + +named_theorems ADT_IF_Refine_assms + +defs arch_extras_def: + "arch_extras \ \s. True" + +declare arch_extras_def[simp] + +lemma kernelEntry_invs'[ADT_IF_Refine_assms, wp]: + "\invs' and (\s. e \ Interrupt \ ct_running' s) + and (\s. ksSchedulerAction s = ResumeCurrentThread) + and arch_extras\ + kernelEntry_if e tc + \\_. invs'\" + apply (simp add: kernelEntry_if_def) + apply (wp threadSet_invs_trivial threadSet_ct_running' hoare_weak_lift_imp + | wp (once) hoare_drop_imps + | clarsimp)+ + done + +lemma kernelEntry_arch_extras[ADT_IF_Refine_assms, wp]: + "\invs' and (\s. e \ Interrupt \ ct_running' s) + and (\s. ksSchedulerAction s = ResumeCurrentThread) + and arch_extras\ + kernelEntry_if e tc + \\_. arch_extras\" + apply (simp add: kernelEntry_if_def) + apply (wp threadSet_invs_trivial threadSet_ct_running' hoare_weak_lift_imp + | wp (once) hoare_drop_imps + | clarsimp)+ + done + +crunch threadSet + for arch_extras[ADT_IF_Refine_assms, wp]: "arch_extras" + +lemma arch_tcb_context_set_tcb_relation[ADT_IF_Refine_assms]: + "tcb_relation tcb tcb' + \ tcb_relation (tcb\tcb_arch := arch_tcb_context_set tc (tcb_arch tcb)\) + (tcbArch_update (atcbContextSet tc) tcb')" + by (simp add: tcb_relation_def arch_tcb_relation_def arch_tcb_context_set_def atcbContextSet_def) + +lemma arch_tcb_context_get_atcbContextGet[ADT_IF_Refine_assms]: + "tcb_relation tcb tcb' + \ (arch_tcb_context_get \ tcb_arch) tcb = (atcbContextGet \ tcbArch) tcb'" + by (simp add: tcb_relation_def arch_tcb_relation_def arch_tcb_context_get_def atcbContextGet_def) + +definition + "ptable_attrs_s' s \ ptable_attrs (ksCurThread s) (absKState s)" + +definition + "ptable_xn_s' s \ \addr. Execute \ ptable_attrs_s' s addr" + +definition doUserOp_if :: + "user_transition_if \ user_context \ (event option \ user_context) kernel" where + "doUserOp_if uop tc \ + do pr \ gets ptable_rights_s'; + pxn \ gets (\s x. pr x \ {} \ ptable_xn_s' s x); + pl \ gets (\s. ptable_lift_s' s |` {x. pr x \ {}}); + allow_read \ return {y. \x. pl x = Some y \ AllowRead \ pr x}; + allow_write \ return {y. \x. pl x = Some y \ AllowWrite \ pr x}; + t \ getCurThread; + um \ gets (\s. (user_mem' s \ ptrFromPAddr)); + dm \ gets (\s. (device_mem' s \ ptrFromPAddr)); + ds \ gets (device_state \ ksMachineState); + assert (dom (um \ addrFromPPtr) \ - dom ds); + assert (dom (dm \ addrFromPPtr) \ dom ds); + u \ + return + (uop t pl pr pxn + (tc, um |` allow_read, + (ds \ ptrFromPAddr) |` allow_read)); + assert (u \ {}); + (e, tc', um',ds') \ select u; + doMachineOp + (user_memory_update + ((um' |` allow_write \ addrFromPPtr) |` (- (dom ds)))); + doMachineOp + (device_memory_update + ((ds' |` allow_write \ addrFromPPtr) |` dom ds)); + return (e, tc') + od" + +lemma ptable_attrs_abs_state[simp]: + "ptable_attrs thread (abs_state s) = ptable_attrs thread s" + by (simp add: ptable_attrs_def abs_state_def) + +lemma doUserOp_if_empty_fail[ADT_IF_Refine_assms]: + "empty_fail (doUserOp_if uop tc)" + apply (simp add: doUserOp_if_def) + apply (wp (once)) + apply wp + apply (wp (once)) + apply wp + apply (wp (once)) + apply wp + apply (wp (once)) + apply wp + apply (wp (once)) + apply wp + apply (wp (once)) + apply wp + apply (wp (once)) + apply wp + apply (wp (once)) + apply wp + apply (wp (once)) + apply wp + apply (subst bind_assoc[symmetric]) + apply (rule empty_fail_bind) + apply (rule empty_fail_select_bind) + apply (wp | wpc)+ + done + +lemma do_user_op_if_corres[ADT_IF_Refine_assms]: + "corres (=) (einvs and ct_running and (\_. \t pl pr pxn tcu. f t pl pr pxn tcu \ {})) + (invs' and (\s. ksSchedulerAction s = ResumeCurrentThread) and ct_running') + (do_user_op_if f tc) (doUserOp_if f tc)" + apply (rule corres_gen_asm) + apply (simp add: do_user_op_if_def doUserOp_if_def) + apply (rule corres_gets_same) + apply (clarsimp simp: ptable_rights_s_def ptable_rights_s'_def) + apply (subst absKState_correct, fastforce, assumption+) + apply (clarsimp elim!: state_relationE) + apply simp + apply (rule corres_gets_same) + apply (clarsimp simp: ptable_attrs_s'_def ptable_attrs_s_def ptable_xn_s'_def ptable_xn_s_def) + apply (subst absKState_correct, fastforce, assumption+) + apply (clarsimp elim!: state_relationE) + apply simp + apply (rule corres_gets_same) + apply (clarsimp simp: absArchState_correct curthread_relation ptable_lift_s'_def + ptable_lift_s_def) + apply (subst absKState_correct, fastforce, assumption+) + apply (clarsimp elim!: state_relationE) + apply simp + apply (simp add: getCurThread_def) + apply (rule corres_gets_same) + apply (simp add: curthread_relation) + apply simp + apply (rule corres_gets_same[where R ="\r s. dom (r \ addrFromPPtr) \ - device_region s"]) + apply (clarsimp simp add: user_mem_relation dest!: invs_valid_stateI invs_valid_stateI') + apply (clarsimp simp: invs_def valid_state_def pspace_respects_device_region_def) + apply fastforce + apply (rule corres_gets_same[where R ="\r s. dom (r \ addrFromPPtr) \ device_region s"]) + apply (clarsimp simp add: device_mem_relation dest!: invs_valid_stateI invs_valid_stateI') + apply (clarsimp simp: invs_def valid_state_def pspace_respects_device_region_def) + apply fastforce + apply (rule corres_gets_same[where R ="\r s. dom r = device_region s"]) + apply (clarsimp simp: state_relation_def) + apply simp + apply (rule corres_assert_imp_r) + apply fastforce + apply (rule corres_assert_imp_r) + apply fastforce + apply (rule corres_guard_imp) + apply (rule corres_split[where r'="(=)"]) + apply (clarsimp simp: select_def corres_underlying_def) + apply clarsimp + apply (rule corres_split[OF corres_machine_op,where r'="(=)"]) + apply (rule corres_underlying_trivial) + apply (clarsimp simp: user_memory_update_def) + apply (rule no_fail_modify) + apply (rule corres_split[OF corres_machine_op,where r'="(=)"]) + apply (rule corres_underlying_trivial) + apply wp + apply (rule corres_trivial, clarsimp) + apply (wp hoare_TrueI[where P = \] | simp)+ + done + +lemma doUserOp_if_invs'[ADT_IF_Refine_assms, wp]: + "\invs' and (\s. ksSchedulerAction s = ResumeCurrentThread) and ct_running' and ex_abs (einvs)\ + doUserOp_if f tc + \\_. invs'\" + apply (simp add: doUserOp_if_def split_def ex_abs_def) + apply (wp device_update_invs' dmo_invs' | simp)+ + apply (clarsimp simp add: no_irq_modify user_memory_update_def) + apply wpsimp + apply wp+ + apply (clarsimp simp: user_memory_update_def simpler_modify_def + restrict_map_def + split: option.splits) + apply (auto dest: ptable_rights_imp_UserData[rotated 2] + simp: ptable_rights_s'_def ptable_lift_s'_def) + done + +lemma doUserOp_valid_duplicates[ADT_IF_Refine_assms, wp]: + "doUserOp_if f tc \arch_extras\" + apply (simp add: doUserOp_if_def split_def) + apply (wp dmo_invs' | simp)+ + done + +lemma doUserOp_if_schedact[ADT_IF_Refine_assms, wp]: + "doUserOp_if f tc \\s. P (ksSchedulerAction s)\" + apply (simp add: doUserOp_if_def) + apply (wp | wpc | simp)+ + done + +lemma doUserOp_if_st_tcb_at[ADT_IF_Refine_assms, wp]: + "doUserOp_if f tc \st_tcb_at' st t\" + apply (simp add: doUserOp_if_def) + apply (wp | wpc | simp)+ + done + +lemma doUserOp_if_cur_thread[ADT_IF_Refine_assms, wp]: + "doUserOp_if f tc \\s. P (ksCurThread s)\" + apply (simp add: doUserOp_if_def) + apply (wp | wpc | simp)+ + done + +lemma do_user_op_if_corres'[ADT_IF_Refine_assms]: + "corres_underlying state_relation nf False (=) (einvs and ct_running) + (invs' and (\s. ksSchedulerAction s = ResumeCurrentThread) and ct_running') + (do_user_op_if f tc) (doUserOp_if f tc)" + apply (simp add: do_user_op_if_def doUserOp_if_def) + apply (rule corres_gets_same) + apply (clarsimp simp: ptable_rights_s_def ptable_rights_s'_def) + apply (subst absKState_correct, fastforce, assumption+) + apply (clarsimp elim!: state_relationE) + apply simp + apply (rule corres_gets_same) + apply (clarsimp simp: ptable_attrs_s'_def ptable_attrs_s_def ptable_xn_s'_def ptable_xn_s_def) + apply (subst absKState_correct, fastforce, assumption+) + apply (clarsimp elim!: state_relationE) + apply simp + apply (rule corres_gets_same) + apply (clarsimp simp: absArchState_correct curthread_relation ptable_lift_s'_def + ptable_lift_s_def) + apply (subst absKState_correct, fastforce, assumption+) + apply (clarsimp elim!: state_relationE) + apply simp + apply (simp add: getCurThread_def) + apply (rule corres_gets_same) + apply (simp add: curthread_relation) + apply simp + apply (rule corres_gets_same[where R ="\r s. dom (r \ addrFromPPtr) \ - device_region s"]) + apply (clarsimp simp add: user_mem_relation dest!: invs_valid_stateI invs_valid_stateI') + apply (clarsimp simp: invs_def valid_state_def pspace_respects_device_region_def) + apply fastforce + apply (rule corres_gets_same[where R ="\r s. dom (r \ addrFromPPtr) \ device_region s"]) + apply (clarsimp simp add: device_mem_relation dest!: invs_valid_stateI invs_valid_stateI') + apply (clarsimp simp: invs_def valid_state_def pspace_respects_device_region_def dom_def) + apply (rule corres_gets_same[where R ="\r s. dom r = device_region s"]) + apply (clarsimp simp: state_relation_def) + apply simp + apply (rule corres_assert_imp_r) + apply fastforce + apply (rule corres_assert_imp_r) + apply fastforce + apply (rule corres_guard_imp) + apply (rule corres_split[where r'="dc"]) + apply (rule corres_assert') + apply simp + apply (rule corres_split[where r'="(=)"]) + apply (clarsimp simp: select_def corres_underlying_def) + apply clarsimp + apply (rule corres_split[OF corres_machine_op',where r'="(=)"]) + apply (rule corres_underlying_trivial, clarsimp) + apply (rule corres_split[OF corres_machine_op', where r'="(=)"]) + apply (rule corres_underlying_trivial, clarsimp) + apply (rule corres_trivial, clarsimp) + apply (wp hoare_TrueI[where P = \] | simp)+ + apply force + apply force + done + +lemma getActiveIRQ_nf: + "no_fail (\_. True) (getActiveIRQ in_kernel)" + apply (simp add: getActiveIRQ_def) + apply (rule no_fail_pre) + apply (rule no_fail_gets no_fail_modify + no_fail_return | rule no_fail_bind | simp + | intro impI conjI)+ + apply (wp del: no_irq | simp)+ + done + +lemma dmo_getActiveIRQ_corres[ADT_IF_Refine_assms]: + "corres (=) \ \ (do_machine_op (getActiveIRQ in_kernel)) (doMachineOp (getActiveIRQ in_kernel'))" + apply (rule SubMonad_R.corres_machine_op) + apply (rule corres_Id) + apply (simp add: getActiveIRQ_def non_kernel_IRQs_def) + apply simp + apply (rule getActiveIRQ_nf) + done + +lemma dmo'_getActiveIRQ_wp[ADT_IF_Refine_assms]: + "\\s. P (irq_at (irq_state (ksMachineState s) + 1) (irq_masks (ksMachineState s))) + (s\ksMachineState := (ksMachineState s\irq_state := irq_state (ksMachineState s) + 1\)\)\ + doMachineOp (getActiveIRQ in_kernel) + \P\" + apply(simp add: doMachineOp_def getActiveIRQ_def non_kernel_IRQs_def) + apply(wp modify_wp | wpc)+ + apply clarsimp + apply(erule use_valid) + apply (wp modify_wp) + apply(auto simp: irq_at_def) + done + +lemma scheduler_if'_arch_extras[ADT_IF_Refine_assms, wp]: + "\invs' and arch_extras\ + schedule'_if tc + \\_. arch_extras\" + apply (simp add: schedule'_if_def) + apply (wp hoare_drop_imps | simp)+ + done + +lemma handlePreemption_if_arch_extras[ADT_IF_Refine_assms, wp]: + "handlePreemption_if tc \arch_extras\" + apply (simp add: handlePreemption_if_def) + apply (wp dmo'_getActiveIRQ_wp hoare_drop_imps) + done + +crunch doUserOp_if + for ksDomainTime_inv[ADT_IF_Refine_assms, wp]: "\s. P (ksDomainTime s)" + and ksDomSchedule_inv[ADT_IF_Refine_assms, wp]: "\s. P (ksDomSchedule s)" + +crunch checkActiveIRQ_if + for arch_extras[ADT_IF_Refine_assms, wp]: arch_extras + +lemma valid_device_abs_state_eq[ADT_IF_Refine_assms]: + "valid_machine_state s \ abs_state s = s" + apply (simp add: abs_state_def observable_memory_def) + apply (case_tac s) + apply clarsimp + apply (case_tac machine_state) + apply clarsimp + apply (rule ext) + apply (fastforce simp: user_mem_def option_to_0_def valid_machine_state_def) + done + +lemma doUserOp_if_no_interrupt[ADT_IF_Refine_assms]: + "\K (uop_sane uop)\ + doUserOp_if uop tc + \\r s. (fst r) \ Some Interrupt\" + apply (simp add: doUserOp_if_def del: split_paired_All) + apply (wp | wpc)+ + apply (clarsimp simp: uop_sane_def simp del: split_paired_All) + done + +lemma handleEvent_corres_arch_extras[ADT_IF_Refine_assms]: + "corres (dc \ dc) + (einvs and (\s. event \ Interrupt \ ct_running s) and schact_is_rct) + (invs' and (\s. event \ Interrupt \ ct_running' s) + and (\s. ksSchedulerAction s = ResumeCurrentThread) + and arch_extras) + (handle_event event) (handleEvent event)" + by (fastforce intro: corres_guard2_imp[OF handleEvent_corres]) + +lemma getActiveIRQ_corres_True_False: + "corres_underlying Id False True (=) \ \ (getActiveIRQ True) (getActiveIRQ False)" + unfolding getActiveIRQ_def + by (corres simp: non_kernel_IRQs_def) + +lemma maybeHandleInterrupt_corres_True_False[ADT_IF_Refine_assms]: + "corres dc einvs invs' (maybe_handle_interrupt True) (maybeHandleInterrupt False)" + unfolding maybe_handle_interrupt_def maybeHandleInterrupt_def + apply (corres corres: corres_machine_op getActiveIRQ_corres_True_False + handleInterrupt_corres[@lift_corres_args] + simp: irq_state_independent_def + | corres_cases_both)+ + apply (wpsimp wp: hoare_drop_imps) + apply clarsimp + apply (strengthen contract_all_imp_strg[where P'=True, simplified]) + apply (wpsimp wp: doMachineOp_getActiveIRQ_IRQ_active' hoare_vcg_all_lift) + apply clarsimp + apply (clarsimp simp: invs'_def valid_state'_def) + done + +end + +requalify_consts + RISCV64.doUserOp_if + + +global_interpretation ADT_IF_Refine_1?: ADT_IF_Refine_1 doUserOp_if +proof goal_cases + interpret Arch . + case 1 show ?case + by (unfold_locales; (fact ADT_IF_Refine_assms)?) +qed + + +sublocale valid_initial_state_noenabled \ valid_initial_state_noenabled?: + ADT_valid_initial_state_noenabled doUserOp_if .. + +sublocale valid_initial_state_noenabled \ valid_initial_state .. + +end diff --git a/proof/infoflow/refine/AARCH64/ArchADT_IF_Refine_C.thy b/proof/infoflow/refine/AARCH64/ArchADT_IF_Refine_C.thy new file mode 100644 index 0000000000..d45cb057a3 --- /dev/null +++ b/proof/infoflow/refine/AARCH64/ArchADT_IF_Refine_C.thy @@ -0,0 +1,237 @@ +(* + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +theory ArchADT_IF_Refine_C +imports ADT_IF_Refine_C +begin + +context kernel_m begin + +named_theorems ADT_IF_Refine_assms + +lemma handleInterrupt_ccorres[ADT_IF_Refine_assms]: + "ccorres (K dc \ dc) (liftxf errstate id (K ()) ret__unsigned_long_') + (invs') + (UNIV) + [] + (handleEvent Interrupt) + (handleInterruptEntry_C_body_if)" + apply (rule ccorres_guard_imp2) + apply (simp add: handleEvent_def minus_one_norm handleInterruptEntry_C_body_if_def) + apply (rule ccorres_add_return2) + apply (ctac (no_vcg) add: checkInterrupt_ccorres) + apply (rule_tac R="\_. rv = Inr ()" in ccorres_return[where R'=UNIV]) + apply (rule conseqPre, vcg) + apply (clarsimp simp: return_def) + apply (simp add: liftE_def) + apply wpsimp + apply clarsimp + done + +lemma handleInvocation_ccorres'[ADT_IF_Refine_assms]: + "ccorres (K dc \ dc) (liftxf errstate id (K ()) ret__unsigned_long_') + (invs' and arch_extras and ct_active' and sch_act_simple) + (UNIV \ {s. isCall_' s = from_bool isCall} + \ {s. isBlocking_' s = from_bool isBlocking}) [] + (handleInvocation isCall isBlocking) (Call handleInvocation_'proc)" + apply (simp only: arch_extras_def pred_top_right_neutral) + apply (rule handleInvocation_ccorres) + done + +definition + "ptable_rights_s'' s \ ptable_rights (cur_thread (cstate_to_A s)) (cstate_to_A s)" + +definition + "ptable_lift_s'' s \ ptable_lift (cur_thread (cstate_to_A s)) (cstate_to_A s)" + +definition + "ptable_attrs_s'' s \ ptable_attrs (cur_thread (cstate_to_A s)) (cstate_to_A s)" + +definition + "ptable_xn_s'' s \ \addr. Execute \ ptable_attrs_s'' s addr" + +definition + doMachineOp_C :: "(machine_state, 'a) nondet_monad \ (cstate, 'a) nondet_monad" +where + "doMachineOp_C mop \ + do + ms \ gets (\s. phantom_machine_state_' (globals s)); + (r, ms') \ select_f (mop ms); + modify (\s. s \globals := globals s \ phantom_machine_state_' := ms' \\); + return r + od" + +definition doUserOp_C_if + :: "user_transition_if \ user_context \ (cstate, (event option \ user_context)) nondet_monad" + where + "doUserOp_C_if uop tc \ + do + pr \ gets ptable_rights_s''; + pxn \ gets (\s x. pr x \ {} \ ptable_xn_s'' s x); + pl \ gets (\s. restrict_map (ptable_lift_s'' s) {x. pr x \ {}}); + allow_read \ return {y. \x. pl x = Some y \ AllowRead \ pr x}; + allow_write \ return {y. \x. pl x = Some y \ AllowWrite \ pr x}; + t \ gets (\s. cur_thread (cstate_to_A s)); + um \ gets (\s. user_mem_C (globals s) \ ptrFromPAddr); + dm \ gets (\s. device_mem_C (globals s) \ ptrFromPAddr); + ds \ gets (\s. device_state (phantom_machine_state_' (globals s))); + assert (dom (um \ addrFromPPtr) \ - dom ds); + assert (dom (dm \ addrFromPPtr) \ dom ds); + u \ return (uop t pl pr pxn (tc, um |` allow_read, (ds \ ptrFromPAddr)|` allow_read)); + assert (u \ {}); + (e,(tc',um',ds')) \ select u; + setUserMem_C ((um' |` allow_write \ addrFromPPtr) |` (- dom ds)); + setDeviceState_C ((ds' |` allow_write \ addrFromPPtr) |` dom ds); + return (e,tc') + od" + +lemma corres_underlying_split4: + "(\a b c d. corres_underlying srel nf nf' rrel (Q a b c d) (Q' a b c d) (f a b c d) (f' a b c d)) + \ corres_underlying srel nf nf' rrel (case x of (a,b,c,d) \ Q a b c d) + (case x of (a,b,c,d) \ Q' a b c d) + (case x of (a,b,c,d) \ f a b c d) + (case x of (a,b,c,d) \ f' a b c d)" + by (cases x; simp) + +lemma do_user_op_if_C_corres[ADT_IF_Refine_assms]: + "corres_underlying rf_sr False False (=) + (invs' and ex_abs einvs and (\_. uop_nonempty f)) \ + (doUserOp_if f tc) (doUserOp_C_if f tc)" + apply (rule corres_gen_asm) + apply (simp add: doUserOp_if_def doUserOp_C_if_def uop_nonempty_def del: split_paired_All) + apply (rule corres_gets_same) + apply (fastforce dest: ex_abs_ksReadyQueues_asrt + simp: absKState_crelation ptable_rights_s'_def ptable_rights_s''_def + rf_sr_def cstate_relation_def Let_def cstate_to_H_correct) + apply simp + apply (rule corres_gets_same) + apply (fastforce dest: ex_abs_ksReadyQueues_asrt + simp: ptable_xn_s'_def ptable_xn_s''_def ptable_attrs_s_def + absKState_crelation ptable_attrs_s'_def ptable_attrs_s''_def rf_sr_def) + apply simp + apply (rule corres_gets_same) + apply clarsimp + apply (frule ex_abs_ksReadyQueues_asrt) + apply (clarsimp simp: absKState_crelation curthread_relation ptable_lift_s'_def ptable_lift_s''_def + ptable_lift_s_def rf_sr_def) + apply simp + apply (simp add: getCurThread_def) + apply (rule corres_gets_same) + apply (fastforce dest: ex_abs_ksReadyQueues_asrt simp: absKState_crelation rf_sr_def) + apply simp + apply (rule corres_gets_same) + apply (rule fun_cong[where x=ptrFromPAddr]) + apply (rule_tac f=comp in arg_cong) + apply (rule user_mem_C_relation[symmetric]) + apply (simp add: rf_sr_def cstate_relation_def Let_def cpspace_relation_def) + apply fastforce + apply simp + apply (rule corres_gets_same) + apply (clarsimp simp: rf_sr_def cstate_relation_def Let_def + cpspace_relation_def) + apply (drule device_mem_C_relation[symmetric]) + apply fastforce + apply (simp add: comp_def) + apply simp + apply (rule corres_gets_same) + apply (clarsimp simp: cstate_relation_def rf_sr_def + Let_def cmachine_state_relation_def) + apply simp + apply (rule corres_guard_imp) + apply (rule_tac P=\ and P'=\ and r'="(=)" in corres_split) + apply (clarsimp simp add: corres_underlying_def fail_def + assert_def return_def + split: if_splits) + apply simp + apply (rule_tac P=\ and P'=\ and r'="(=)" in corres_split) + apply (clarsimp simp add: corres_underlying_def fail_def + assert_def return_def + split: if_splits) + apply simp + apply (rule_tac r'="(=)" in corres_split[OF corres_select]) + apply clarsimp + apply simp + apply (rule corres_underlying_split4) + apply (rule corres_split[OF user_memory_update_corres_C]) + apply (rule corres_split[OF device_update_corres_C]) + apply (wp | simp)+ + apply (clarsimp simp: ex_abs_def restrict_map_def invs_pspace_aligned' + invs_pspace_distinct' ptable_lift_s'_def ptable_rights_s'_def + split: if_splits) + apply (drule ptable_rights_imp_UserData[rotated -1]) + apply ((fastforce | intro conjI)+)[4] + apply (clarsimp simp: user_mem'_def device_mem'_def dom_def split: if_splits) + apply fastforce + apply (clarsimp simp add: invs'_def valid_state'_def valid_pspace'_def ex_abs_def) + done + +lemma check_active_irq_corres_C[ADT_IF_Refine_assms]: + "corres_underlying rf_sr False False (=) \ \ + (checkActiveIRQ_if tc) (checkActiveIRQ_C_if tc)" + apply (simp add: checkActiveIRQ_if_def checkActiveIRQ_C_if_def) + apply (simp add: getActiveIRQ_C_def) + apply (subst bind_assoc[symmetric]) + apply (rule corres_guard_imp) + apply (rule corres_split[where r'="\a c. case a of None \ c = ucast irqInvalid + | Some x \ c = ucast x \ c \ ucast irqInvalid", + OF ccorres_corres_u_xf]) + apply (rule ccorres_guard_imp) + apply (rule ccorres_rel_imp, rule ccorres_guard_imp) + apply (rule getActiveIRQ_ccorres) + apply simp+ + apply (rule no_fail_dmo') + apply (rule no_fail_getActiveIRQ) + apply (rule corres_trivial, clarsimp split: if_split option.splits) + apply wp+ + apply simp+ + apply fastforce + done + +lemma obs_cpspace_user_data_relation[ADT_IF_Refine_assms]: + "\pspace_aligned' bd;pspace_distinct' bd; + cpspace_user_data_relation (ksPSpace bd) (underlying_memory (ksMachineState bd)) hgs\ + \ cpspace_user_data_relation (ksPSpace bd) (underlying_memory (observable_memory (ksMachineState bd) (user_mem' bd))) hgs" + apply (clarsimp simp: cmap_relation_def dom_heap_to_user_data) + apply (drule bspec,fastforce) + apply (clarsimp simp: cuser_user_data_relation_def observable_memory_def + heap_to_user_data_def map_comp_def Let_def + split: option.split_asm) + apply (drule_tac x = off in spec) + apply (subst option_to_0_user_mem') + apply (subst map_option_byte_to_word_heap) + apply (clarsimp simp: projectKO_opt_user_data pointerInUserData_def field_simps + split: kernel_object.split_asm option.split_asm) + apply (frule(1) pspace_alignedD') + apply (subst neg_mask_add_aligned) + apply (simp add: objBits_simps) + apply (simp add: word_less_nat_alt) + apply (rule le_less_trans[OF unat_plus_gt]) + apply (subst add.commute) + apply (subst unat_mult_simple) + apply (simp add: word_bits_def) + apply (rule less_le_trans[OF unat_lt2p]) + apply simp + apply simp + apply (rule nat_add_offset_less [where n = 3, simplified]) + apply simp + apply (rule unat_lt2p) + apply (simp add: pageBits_def objBits_simps) + apply (frule(1) pspace_distinctD') + apply (clarsimp simp: obj_at'_def typ_at'_def ko_wp_at'_def objBits_simps) + apply simp + done + +end + + +sublocale kernel_m \ ADT_IF_Refine_1?: ADT_IF_Refine_1 _ _ _ doUserOp_C_if +proof goal_cases + interpret Arch . + case 1 show ?case + by (unfold_locales; (fact ADT_IF_Refine_assms)?) +qed + +end diff --git a/proof/infoflow/refine/AARCH64/Example_Valid_StateH.thy b/proof/infoflow/refine/AARCH64/Example_Valid_StateH.thy new file mode 100644 index 0000000000..a7df9032df --- /dev/null +++ b/proof/infoflow/refine/AARCH64/Example_Valid_StateH.thy @@ -0,0 +1,3835 @@ +(* + * Copyright 2023, Proofcraft Pty Ltd + * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) + * + * SPDX-License-Identifier: GPL-2.0-only + *) + +theory Example_Valid_StateH +imports "InfoFlow.Example_Valid_State" ArchADT_IF_Refine +begin + +context begin interpretation Arch . + +section \Haskell state\ + +text \One invariant we need on s0 is that there exists + an associated Haskell state satisfying the invariants. + This does not yet exist.\ + +subsection \Defining the State\ + +definition empty_cte :: "nat \ bool list \ (capability \ mdbnode) option" where + "empty_cte bits \ \x. if length x = bits then Some (NullCap, MDB 0 0 False False) else None" + +abbreviation (input) Null_mdb :: "mdbnode" where + "Null_mdb \ MDB 0 0 False False" + + +text \Low's CSpace\ + +definition Low_capsH :: "cnode_index \ (capability \ mdbnode) option" where + "Low_capsH \ + (empty_cte 10) + ((the_nat_to_bl_10 1) + \ (ThreadCap Low_tcb_ptr, Null_mdb), + (the_nat_to_bl_10 2) + \ (CNodeCap Low_cnode_ptr 10 2 10, MDB 0 Low_tcb_ptr False False), + (the_nat_to_bl_10 3) + \ (ArchObjectCap (PageTableCap Low_pd_ptr (Some (ucast Low_asid, 0))), + MDB 0 (Low_tcb_ptr + 0x20) False False), + (the_nat_to_bl_10 4) + \ (ArchObjectCap (ASIDPoolCap Low_pool_ptr (ucast Low_asid)), Null_mdb), + (the_nat_to_bl_10 5) + \ (ArchObjectCap (FrameCap shared_page_ptr_virt VMReadWrite RISCVLargePage + False (Some (ucast Low_asid, 0))), + MDB 0 (Silc_cnode_ptr + 0xA0) False False), + (the_nat_to_bl_10 6) + \ (ArchObjectCap (PageTableCap Low_pt_ptr (Some (ucast Low_asid, 0))), Null_mdb), + (the_nat_to_bl_10 318) + \ (NotificationCap ntfn_ptr 0 True False, MDB (Silc_cnode_ptr + 318 * 0x20) 0 False False))" + +definition Low_cte' :: "10 word \ cte option" where + "Low_cte' \ (map_option (\(cap, mdb). CTE cap mdb)) \ Low_capsH \ to_bl" + +definition Low_cte :: "obj_ref \ obj_ref \ kernel_object option" where + "Low_cte \ \base offs. + if is_aligned offs 5 \ base \ offs \ offs \ base + 2 ^ 15 - 1 + then map_option (\cte. KOCTE cte) (Low_cte' (ucast (offs - base >> 5))) + else None" + + +text \High's Cspace\ + +definition High_capsH :: "cnode_index \ (capability \ mdbnode) option" where + "High_capsH \ + (empty_cte 10) + ((the_nat_to_bl_10 1) + \ (ThreadCap High_tcb_ptr, Null_mdb), + (the_nat_to_bl_10 2) + \ (CNodeCap High_cnode_ptr 10 2 10, MDB 0 High_tcb_ptr False False), + (the_nat_to_bl_10 3) + \ (ArchObjectCap (PageTableCap High_pd_ptr (Some (ucast High_asid, 0))), + MDB 0 (High_tcb_ptr + 0x20) False False), + (the_nat_to_bl_10 4) + \ (ArchObjectCap (ASIDPoolCap High_pool_ptr (ucast High_asid)), Null_mdb), + (the_nat_to_bl_10 5) + \ (ArchObjectCap (FrameCap shared_page_ptr_virt VMReadOnly RISCVLargePage + False (Some (ucast High_asid, 0))), + MDB (Silc_cnode_ptr + 0xA0) 0 False False), + (the_nat_to_bl_10 6) + \ (ArchObjectCap (PageTableCap High_pt_ptr (Some (ucast High_asid, 0))), + Null_mdb), + (the_nat_to_bl_10 318) + \ (NotificationCap ntfn_ptr 0 False True, MDB 0 (Silc_cnode_ptr + 318 * 0x20) False False))" + +definition High_cte' :: "10 word \ cte option" where + "High_cte' \ (map_option (\(cap, mdb). CTE cap mdb)) \ High_capsH \ to_bl" + +definition High_cte :: "obj_ref \ obj_ref \ kernel_object option" where + "High_cte \ \base offs. + if is_aligned offs 5 \ base \ offs \ offs \ base + 2 ^ 15 - 1 + then map_option (\cte. KOCTE cte) (High_cte' (ucast (offs - base >> 5))) + else None" + + +text \We need a copy of boundary crossing caps owned by SilcLabel.\ + +definition Silc_capsH :: "cnode_index \ (capability \ mdbnode) option" where + "Silc_capsH \ + (empty_cte 10) + ((the_nat_to_bl_10 2) + \ (CNodeCap Silc_cnode_ptr 10 2 10, Null_mdb), + (the_nat_to_bl_10 5) + \ (ArchObjectCap (FrameCap shared_page_ptr_virt VMReadOnly RISCVLargePage + False (Some (ucast Silc_asid, 0))), + MDB (Low_cnode_ptr + 0xA0) (High_cnode_ptr + 0xA0) False False), + (the_nat_to_bl_10 318) + \ (NotificationCap ntfn_ptr 0 True False, + MDB (High_cnode_ptr + 318 * 0x20) (Low_cnode_ptr + 318 * 0x20) False False))" + +definition Silc_cte' :: "10 word \ cte option" where + "Silc_cte' \ (map_option (\(cap, mdb). CTE cap mdb)) \ Silc_capsH \ to_bl" + +definition Silc_cte :: "obj_ref \ obj_ref \ kernel_object option" where + "Silc_cte \ \base offs. + if is_aligned offs 5 \ base \ offs \ offs \ base + 2 ^ 15 - 1 + then map_option (\cte. KOCTE cte) (Silc_cte' (ucast (offs - base >> 5))) + else None" + + +text \Notification between Low and High\ + +definition ntfnH :: notification where + "ntfnH \ NTFN (ntfn.WaitingNtfn [High_tcb_ptr]) None" + + +text \Global page table\ + +definition global_pteH' :: "pt_index \ pte" where + "global_pteH' idx \ + if idx = 0x100 + then PagePTE ((ucast (idx && mask (ptTranslationBits - 1)) << ptTranslationBits * size max_pt_level)) + False False False VMKernelOnly + else if idx = elf_index + then PagePTE (ucast ((kernelELFPAddrBase && ~~mask toplevel_bits) >> pageBits)) False False False VMKernelOnly + else InvalidPTE" + +definition global_pteH where + "global_pteH \ (\idx. if idx \ kernel_mapping_slots then global_pteH' idx else InvalidPTE)" + +definition global_ptH :: "obj_ref \ obj_ref \ kernel_object option" where + "global_ptH \ \base. + (map_option (\x. KOArch (KOPTE (global_pteH (x :: pt_index))))) \ + (\offs. if is_aligned offs 3 \ base \ offs \ offs \ base + 2 ^ 12 - 1 + then Some (ucast (offs - base >> 3)) else None)" + + +text \Low's page tables\ + +definition Low_pt'H :: "pt_index \ pte" where + "Low_pt'H \ + (\_. InvalidPTE) + (0 := PagePTE (shared_page_ptr_phys >> pt_bits) False False False VMReadWrite)" + +definition Low_ptH :: "obj_ref \ obj_ref \ kernel_object option" where + "Low_ptH \ + \base. (map_option (\x. KOArch (KOPTE (Low_pt'H x)))) \ + (\offs. if is_aligned offs 3 \ base \ offs \ offs \ base + 2 ^ 12 - 1 + then Some (ucast (offs - base >> 3)) else None)" + +definition Low_pd'H :: "pt_index \ pte" where + "Low_pd'H \ + global_pteH + (0 := PageTablePTE (addrFromPPtr Low_pt_ptr >> pt_bits) False)" + +definition Low_pdH :: "obj_ref \ obj_ref \ kernel_object option" where + "Low_pdH \ + \base. (map_option (\x. KOArch (KOPTE (Low_pd'H x)))) \ + (\offs. if is_aligned offs 3 \ base \ offs \ offs \ base + 2 ^ 12 - 1 + then Some (ucast (offs - base >> 3)) else None)" + + +text \High's page tables\ + +definition High_pt'H :: "pt_index \ pte" where + "High_pt'H \ + (\_. InvalidPTE) + (0 := PagePTE (shared_page_ptr_phys >> pt_bits) False False False VMReadOnly)" + +definition High_ptH :: "obj_ref \ obj_ref \ kernel_object option" where + "High_ptH \ + \base. (map_option (\x. KOArch (KOPTE (High_pt'H x)))) \ + (\offs. if is_aligned offs 3 \ base \ offs \ offs \ base + 2 ^ 12 - 1 + then Some (ucast (offs - base >> 3)) else None)" + +definition High_pd'H :: "pt_index \ pte" where + "High_pd'H \ + global_pteH + (0 := PageTablePTE (addrFromPPtr High_pt_ptr >> pt_bits) False)" + +definition High_pdH :: "obj_ref \ obj_ref \ kernel_object option" where + "High_pdH \ + \base. (map_option (\x. KOArch (KOPTE (High_pd'H x)))) \ + (\offs. if is_aligned offs 3 \ base \ offs \ offs \ base + 2 ^ 12 - 1 + then Some (ucast (offs - base >> 3)) else None)" + + +text \Low's tcb\ + +definition Low_tcbH :: tcb where + "Low_tcbH \ Thread + \ \tcbCTable =\ (CTE (CNodeCap Low_cnode_ptr 10 2 10) + (MDB (Low_cnode_ptr + 0x40) 0 False False)) + \ \tcbVTable =\ (CTE (ArchObjectCap (PageTableCap Low_pd_ptr (Some (ucast Low_asid, 0)))) + (MDB (Low_cnode_ptr + 0x60) 0 False False)) + \ \tcbReply =\ (CTE (ReplyCap Low_tcb_ptr True True) (MDB 0 0 True True)) + \ \tcbCaller =\ (CTE NullCap Null_mdb) + \ \tcbIPCBufferFrame =\ (CTE NullCap Null_mdb) + \ \tcbDomain =\ Low_domain + \ \tcbState =\ Running + \ \tcbMCPriority =\ Low_mcp + \ \tcbPriority =\ Low_prio + \ \tcbQueued =\ False + \ \tcbFault =\ None + \ \tcbTimeSlice =\ Low_time_slice + \ \tcbFaultHandler =\ 0 + \ \tcbIPCBuffer =\ 0 + \ \tcbBoundNotification =\ None + \ \tcbSchedPrev =\ None + \ \tcbSchedNext =\ None + \ \tcbFlags =\ 0 + \ \tcbContext =\ (ArchThread undefined)" + + +text \High's tcb\ + +definition High_tcbH :: tcb where + "High_tcbH \ Thread + \ \tcbCTable =\ (CTE (CNodeCap High_cnode_ptr 10 2 10) + (MDB (High_cnode_ptr + 0x40) 0 False False)) + \ \tcbVTable =\ (CTE (ArchObjectCap (PageTableCap High_pd_ptr (Some (ucast High_asid, 0)))) + (MDB (High_cnode_ptr + 0x60) 0 False False)) + \ \tcbReply =\ (CTE (ReplyCap High_tcb_ptr True True) (MDB 0 0 True True)) + \ \tcbCaller =\ (CTE NullCap Null_mdb) + \ \tcbIPCBufferFrame =\ (CTE NullCap Null_mdb) + \ \tcbDomain =\ High_domain + \ \tcbState =\ (BlockedOnNotification ntfn_ptr) + \ \tcbMCPriority =\ High_mcp + \ \tcbPriority =\ High_prio + \ \tcbQueued =\ False + \ \tcbFault =\ None + \ \tcbTimeSlice =\ High_time_slice + \ \tcbFaultHandler =\ 0 + \ \tcbIPCBuffer =\ 0 + \ \tcbBoundNotification =\ None + \ \tcbSchedPrev =\ None + \ \tcbSchedNext =\ None + \ \tcbFlags =\ 0 + \ \tcbContext =\ (ArchThread undefined)" + + +text \idle's tcb\ + +definition idle_tcbH :: tcb where + "idle_tcbH \ Thread + \ \tcbCTable =\ (CTE NullCap Null_mdb) + \ \tcbVTable =\ (CTE NullCap Null_mdb) + \ \tcbReply =\ (CTE NullCap Null_mdb) + \ \tcbCaller =\ (CTE NullCap Null_mdb) + \ \tcbIPCBufferFrame =\ (CTE NullCap Null_mdb) + \ \tcbDomain =\ default_domain + \ \tcbState =\ IdleThreadState + \ \tcbMCPriority =\ default_priority + \ \tcbPriority =\ default_priority + \ \tcbQueued =\ False + \ \tcbFault =\ None + \ \tcbTimeSlice =\ timeSlice + \ \tcbFaultHandler =\ 0 + \ \tcbIPCBuffer =\ 0 + \ \tcbBoundNotification =\ None + \ \tcbSchedPrev =\ None + \ \tcbSchedNext =\ None + \ \tcbFlags =\ 0 + \ \tcbContext =\ (ArchThread empty_context)" + + +text \Low's asid pool\ + +abbreviation Low_poolH' :: "obj_ref \ obj_ref" where + "Low_poolH' \ \idx. if idx = ucast (asid_low_bits_of Low_asid) then Some Low_pd_ptr else None" + +definition Low_poolH :: arch_kernel_object where + "Low_poolH \ KOASIDPool (ASIDPool Low_poolH')" + + +text \High's asid pool\ + +abbreviation High_poolH' :: "obj_ref \ obj_ref" where + "High_poolH' \ \idx. if idx = ucast (asid_low_bits_of High_asid) then Some High_pd_ptr else None" + +definition High_poolH :: arch_kernel_object where + "High_poolH \ KOASIDPool (ASIDPool High_poolH')" + + +text \Shared page\ + +definition shared_pageH :: "obj_ref \ obj_ref \ kernel_object option" where + "shared_pageH \ \base. + (\offs. if is_aligned offs 12 \ base \ offs \ offs \ base + 2 ^ 21 - 1 + then Some KOUserData else None)" + + +text \Initial ksPSpace\ + +definition irq_cte :: cte where + "irq_cte \ CTE NullCap Null_mdb" + +definition option_update_range :: "('a \ 'b option) \ ('a \ 'b option) \ ('a \ 'b option)" where + "option_update_range f g \ \x. case f x of None \ g x | Some y \ Some y" + +definition kh0H :: "(obj_ref \ kernel_object)" where + "kh0H \ (option_update_range (\x. if \irq :: irq. init_irq_node_ptr + (ucast irq << 5) = x + then Some (KOCTE (CTE NullCap Null_mdb)) else None) \ + option_update_range (Low_cte Low_cnode_ptr) \ + option_update_range (High_cte High_cnode_ptr) \ + option_update_range (Silc_cte Silc_cnode_ptr) \ + option_update_range [ntfn_ptr \ KONotification ntfnH] \ + option_update_range [irq_cnode_ptr \ KOCTE irq_cte] \ + option_update_range (Low_pdH Low_pd_ptr) \ + option_update_range (High_pdH High_pd_ptr) \ + option_update_range (Low_ptH Low_pt_ptr) \ + option_update_range (High_ptH High_pt_ptr) \ + option_update_range [Low_pool_ptr \ KOArch Low_poolH] \ + option_update_range [High_pool_ptr \ KOArch High_poolH] \ + option_update_range [Low_tcb_ptr \ KOTCB Low_tcbH] \ + option_update_range [High_tcb_ptr \ KOTCB High_tcbH] \ + option_update_range [idle_tcb_ptr \ KOTCB idle_tcbH] \ + option_update_range (shared_pageH shared_page_ptr_virt) \ + option_update_range (global_ptH riscv_global_pt_ptr) + ) Map.empty" + + +lemma s0_ptrs_aligned: + "is_aligned riscv_global_pt_ptr 12" + "is_aligned High_pd_ptr 12" + "is_aligned Low_pd_ptr 12" + "is_aligned High_pt_ptr 12" + "is_aligned Low_pt_ptr 12" + "is_aligned Silc_cnode_ptr 15" + "is_aligned High_cnode_ptr 15" + "is_aligned Low_cnode_ptr 15" + "is_aligned High_tcb_ptr 10" + "is_aligned Low_tcb_ptr 10" + "is_aligned idle_tcb_ptr 10" + "is_aligned ntfn_ptr 5" + "is_aligned shared_page_ptr_virt 21" + "is_aligned irq_cnode_ptr 10" + "is_aligned Low_pool_ptr 12" + "is_aligned High_pool_ptr 12" + by (simp add: is_aligned_def s0_ptr_defs)+ + + +text \Page offset lemmas\ + +lemma page_offs_min': + "is_aligned ptr 21 \ (ptr :: obj_ref) \ ptr + (ucast (x :: pt_index) << 12)" + apply (erule is_aligned_no_wrap') + apply (word_bitwise, auto) + done + +lemma page_offs_min: + "shared_page_ptr_virt \ shared_page_ptr_virt + (ucast (x:: pt_index) << 12)" + by (simp_all add: page_offs_min' s0_ptrs_aligned) + +lemma page_offs_max': + "is_aligned ptr 21 \ (ptr :: obj_ref) + (ucast (x :: pt_index) << 12) \ ptr + 0x1FFFFF" + apply (rule word_plus_mono_right) + apply (simp add: shiftl_t2n mult.commute) + apply (rule div_to_mult_word_lt) + apply simp + apply (rule plus_one_helper) + apply simp + apply (cut_tac ucast_less[where x=x]) + apply simp + apply (fastforce elim: dual_order.strict_trans[rotated]) + apply (drule is_aligned_no_overflow) + apply (simp add: add.commute) + done + +lemma page_offs_max: + "shared_page_ptr_virt + (ucast (x :: pt_index) << 12) \ shared_page_ptr_virt + 0x1FFFFF" + by (simp_all add: page_offs_max' s0_ptrs_aligned) + +definition page_offs_range where + "page_offs_range (ptr :: obj_ref) \ {x. ptr \ x \ x \ ptr + 2 ^ 21 - 1} + \ {x. is_aligned x 12}" + +lemma page_offs_in_range': + "is_aligned ptr 21 \ ptr + (ucast (x :: pt_index) << 12) \ page_offs_range ptr" + apply (clarsimp simp: page_offs_min' page_offs_max' page_offs_range_def add.commute) + apply (rule is_aligned_add[OF _ is_aligned_shift]) + apply (erule is_aligned_weaken) + apply simp + done + +lemma page_offs_in_range: + "shared_page_ptr_virt + (ucast (x :: pt_index) << 12) \ page_offs_range shared_page_ptr_virt" + by (simp_all add: page_offs_in_range' s0_ptrs_aligned) + +lemma page_offs_range_correct': + "\ x \ page_offs_range ptr; is_aligned ptr 21 \ + \ \y. x = ptr + (ucast (y :: pt_index) << 12)" + apply (clarsimp simp: page_offs_range_def s0_ptr_defs) + apply (rule_tac x="ucast ((x - ptr) >> 12)" in exI) + apply (clarsimp simp: ucast_ucast_mask) + apply (subst aligned_shiftr_mask_shiftl) + apply (rule aligned_sub_aligned) + apply assumption + apply (erule is_aligned_weaken) + apply simp + apply simp + apply simp + apply (rule_tac n=21 in mask_eqI) + apply (subst mask_add_aligned) + apply (simp add: is_aligned_def) + apply (simp add: mask_twice) + apply (subst diff_conv_add_uminus) + apply (subst add.commute[symmetric]) + apply (subst mask_add_aligned) + apply (simp add: is_aligned_minus) + apply simp + apply (subst diff_conv_add_uminus) + apply (subst add_mask_lower_bits) + apply (simp add: is_aligned_def) + apply clarsimp + apply (cut_tac x=x and y="ptr + 0x1FFFFF" and n=21 in neg_mask_mono_le) + apply (simp add: add.commute) + apply (drule_tac n=21 in aligned_le_sharp) + apply (simp add: is_aligned_def) + apply (simp add: add.commute) + apply (subst(asm) mask_out_add_aligned[symmetric]) + apply (erule is_aligned_weaken) + apply simp + apply (simp add: mask_def) + done + +lemma page_offs_range_correct: + "x \ page_offs_range shared_page_ptr_virt + \ \y. x = shared_page_ptr_virt + (ucast (y :: pt_index) << 12)" + by (simp_all add: page_offs_range_correct' s0_ptrs_aligned) + + +text \Page table offset lemmas\ + +lemma pt_offs_min': + "is_aligned ptr 12 \ (ptr :: obj_ref) \ ptr + (ucast (x :: pt_index) << 3)" + apply (erule is_aligned_no_wrap') + apply (word_bitwise, auto) + done + +lemma pt_offs_min: + "Low_pd_ptr \ Low_pd_ptr + (ucast (x :: pt_index) << 3)" + "High_pd_ptr \ High_pd_ptr + (ucast (x :: pt_index) << 3)" + "Low_pt_ptr \ Low_pt_ptr + (ucast (x :: pt_index) << 3)" + "High_pt_ptr \ High_pt_ptr + (ucast (x :: pt_index) << 3)" + "riscv_global_pt_ptr \ riscv_global_pt_ptr + (ucast (x :: pt_index) << 3)" + by (simp_all add: pt_offs_min' s0_ptrs_aligned) + +lemma pt_offs_max': + "is_aligned ptr 12 \ (ptr :: obj_ref) + (ucast (x :: pt_index) << 3) \ ptr + 0xFFF" + apply (rule word_plus_mono_right) + apply (simp add: shiftl_t2n mult.commute) + apply (rule div_to_mult_word_lt) + apply simp + apply (rule plus_one_helper) + apply simp + apply (cut_tac ucast_less[where x=x]) + apply simp + apply (fastforce elim: dual_order.strict_trans[rotated]) + apply (drule is_aligned_no_overflow) + apply (simp add: add.commute) + done + +lemma pt_offs_max: + "Low_pd_ptr + (ucast (x :: pt_index) << 3) \ Low_pd_ptr + 0xFFF" + "High_pd_ptr + (ucast (x :: pt_index) << 3) \ High_pd_ptr + 0xFFF" + "Low_pt_ptr + (ucast (x :: pt_index) << 3) \ Low_pt_ptr + 0xFFF" + "High_pt_ptr + (ucast (x :: pt_index) << 3) \ High_pt_ptr + 0xFFF" + "riscv_global_pt_ptr + (ucast (x :: pt_index) << 3) \ riscv_global_pt_ptr + 0xFFF" + by (simp_all add: pt_offs_max' s0_ptrs_aligned) + +definition pt_offs_range where + "pt_offs_range (ptr :: obj_ref) \ {x. ptr \ x \ x \ ptr + 2 ^ 12 - 1} + \ {x. is_aligned x 3}" + +lemma pt_offs_in_range': + "is_aligned ptr 12 + \ ptr + (ucast (x :: pt_index) << 3) \ pt_offs_range ptr" + apply (clarsimp simp: pt_offs_min' pt_offs_max' pt_offs_range_def add.commute) + apply (rule is_aligned_add[OF _ is_aligned_shift]) + apply (erule is_aligned_weaken) + apply simp + done + +lemma pt_offs_in_range: + "Low_pd_ptr + (ucast (x :: pt_index) << 3) \ pt_offs_range Low_pd_ptr" + "High_pd_ptr + (ucast (x :: pt_index) << 3) \ pt_offs_range High_pd_ptr" + "Low_pt_ptr + (ucast (x :: pt_index) << 3) \ pt_offs_range Low_pt_ptr" + "High_pt_ptr + (ucast (x :: pt_index) << 3) \ pt_offs_range High_pt_ptr" + "riscv_global_pt_ptr + (ucast (x :: pt_index) << 3) \ pt_offs_range riscv_global_pt_ptr" + by (simp_all add: pt_offs_in_range' s0_ptrs_aligned) + +lemma pt_offs_range_correct': + "\ x \ pt_offs_range ptr; is_aligned ptr 12 \ + \ \y. x = ptr + (ucast (y :: pt_index) << 3)" + apply (clarsimp simp: pt_offs_range_def s0_ptr_defs) + apply (rule_tac x="ucast ((x - ptr) >> 3)" in exI) + apply (clarsimp simp: ucast_ucast_mask) + apply (subst aligned_shiftr_mask_shiftl) + apply (rule aligned_sub_aligned) + apply assumption + apply (erule is_aligned_weaken) + apply simp + apply simp + apply simp + apply (rule_tac n=12 in mask_eqI) + apply (subst mask_add_aligned) + apply (simp add: is_aligned_def) + apply (simp add: mask_twice) + apply (subst diff_conv_add_uminus) + apply (subst add.commute[symmetric]) + apply (subst mask_add_aligned) + apply (simp add: is_aligned_minus) + apply simp + apply (subst diff_conv_add_uminus) + apply (subst add_mask_lower_bits) + apply (simp add: is_aligned_def) + apply clarsimp + apply (cut_tac x=x and y="ptr + 0xFFF" and n=12 in neg_mask_mono_le) + apply (simp add: add.commute) + apply (drule_tac n=12 in aligned_le_sharp) + apply (simp add: is_aligned_def) + apply (simp add: add.commute) + apply (subst(asm) mask_out_add_aligned[symmetric]) + apply (erule is_aligned_weaken) + apply simp + apply (simp add: mask_def) + done + +lemma pt_offs_range_correct: + "x \ pt_offs_range Low_pd_ptr \ \y. x = Low_pd_ptr + (ucast (y :: pt_index) << 3)" + "x \ pt_offs_range High_pd_ptr \ \y. x = High_pd_ptr + (ucast (y :: pt_index) << 3)" + "x \ pt_offs_range Low_pt_ptr \ \y. x = Low_pt_ptr + (ucast (y :: pt_index) << 3)" + "x \ pt_offs_range High_pt_ptr \ \y. x = High_pt_ptr + (ucast (y :: pt_index) << 3)" + "x \ pt_offs_range riscv_global_pt_ptr \ \y. x = riscv_global_pt_ptr + (ucast (y :: pt_index) << 3)" + by (simp_all add: pt_offs_range_correct' s0_ptrs_aligned) + + +text \CNode offset lemmas\ + +lemma bl_to_bin_le2p_aux: + "bl_to_bin_aux bs w \ (w + 1) * (2 ^ length bs) - 1" + apply (induct bs arbitrary: w) + apply clarsimp + apply (clarsimp split del: split_of_bool) + apply (drule meta_spec, erule xtr8 [rotated], simp)+ + done + +lemma bl_to_bin_le2p: + "bl_to_bin bs \ (2 ^ length bs) - 1" + apply (unfold bl_to_bin_def) + apply (rule xtr3) + prefer 2 + apply (rule bl_to_bin_le2p_aux) + apply simp + done + +lemma of_bl_length_le: + "\ length x = k; k < len_of TYPE('a) \ \ (of_bl x :: 'a :: len word) \ 2 ^ k - 1" + apply (unfold of_bl_def word_less_alt word_numeral_alt) + apply safe + apply (simp add: word_le_def take_bit_int_def uint_2p_alt uint_word_arith_bintrs(2)) + apply (subst mod_pos_pos_trivial) + apply simp + using not_le apply fastforce + apply (subst uint_word_of_int) + apply (subst mod_pos_pos_trivial) + apply (rule bl_to_bin_ge0) + apply (rule order_less_trans) + apply (rule bl_to_bin_lt2p) + apply simp + apply (rule bl_to_bin_le2p) + done + +lemma cnode_offs_min': + "\ is_aligned ptr 15; length x = 10 \ \ (ptr :: obj_ref) \ ptr + of_bl x * 0x20" + apply (erule is_aligned_no_wrap') + apply (rule div_lt_mult) + apply (drule of_bl_length_less[where 'a=64]) + apply simp + apply simp + apply simp + done + +lemma cnode_offs_min: + "length x = 10 \ Low_cnode_ptr \ Low_cnode_ptr + of_bl x * 0x20" + "length x = 10 \ High_cnode_ptr \ High_cnode_ptr + of_bl x * 0x20" + "length x = 10 \ Silc_cnode_ptr \ Silc_cnode_ptr + of_bl x * 0x20" + by (simp_all add: cnode_offs_min' s0_ptrs_aligned) + +lemma cnode_offs_max': + "\ is_aligned ptr 15; length x = 10 \ \ (ptr :: obj_ref) + of_bl x * 0x20 \ ptr + 0x7fff" + apply (rule word_plus_mono_right) + apply (rule div_to_mult_word_lt) + apply simp + apply (rule plus_one_helper) + apply simp + apply (drule of_bl_length_less[where 'a=64]) + apply simp + apply simp + apply (drule is_aligned_no_overflow) + apply (simp add: add.commute) + done + +lemma cnode_offs_max: + "length x = 10 \ Low_cnode_ptr + of_bl x * 0x20 \ Low_cnode_ptr + 0x7fff" + "length x = 10 \ High_cnode_ptr + of_bl x * 0x20 \ High_cnode_ptr + 0x7fff" + "length x = 10 \ Silc_cnode_ptr + of_bl x * 0x20 \ Silc_cnode_ptr + 0x7fff" + by (simp_all add: cnode_offs_max' s0_ptrs_aligned) + +definition cnode_offs_range where + "cnode_offs_range (ptr :: obj_ref) \ {x. ptr \ x \ x \ ptr + 2 ^ 15 - 1} + \ {x. is_aligned x 5}" + +lemma cnode_offs_in_range': + "\ is_aligned ptr 15; length x = 10 \ \ ptr + of_bl x * 0x20 \ cnode_offs_range ptr" + apply (simp add: cnode_offs_min' cnode_offs_max' cnode_offs_range_def add.commute) + apply (rule is_aligned_add) + apply (erule is_aligned_weaken) + apply simp + apply (rule_tac is_aligned_mult_triv2[where x="of_bl x" and n=5, simplified]) + done + +lemma cnode_offs_in_range: + "length x = 10 \ Low_cnode_ptr + of_bl x * 0x20 \ cnode_offs_range Low_cnode_ptr" + "length x = 10 \ High_cnode_ptr + of_bl x * 0x20 \ cnode_offs_range High_cnode_ptr" + "length x = 10 \ Silc_cnode_ptr + of_bl x * 0x20 \ cnode_offs_range Silc_cnode_ptr" + by (simp_all add: cnode_offs_in_range' s0_ptrs_aligned) + +lemma le_mask_eq: + "x \ 2 ^ n - 1 \ x AND mask n = (x :: 'a :: len word)" + apply (unfold word_less_alt word_numeral_alt) + apply (simp add: word_of_int_power_hom mask_eq_exp_minus_1[symmetric]) + apply (erule word_le_mask_eq) + done + +lemma word_div_mult': + fixes c :: obj_ref + shows "\ 0 < c; a \ b * c \ \ a div c \ b" + apply (simp add: word_le_nat_alt unat_div) + apply (simp add: less_Suc_eq_le[symmetric]) + apply (subst td_gal_lt [symmetric]) + apply (simp add: word_less_nat_alt) + apply (erule order_less_le_trans) + apply (subst unat_word_ariths) + apply (rule_tac y="Suc (unat b * unat c)" in order_trans) + apply simp + apply (simp add: word_less_nat_alt) + done + +lemma cnode_offs_range_correct': + "\ x \ cnode_offs_range ptr; is_aligned ptr 15 \ + \ \y. length y = 10 \ (x = ptr + of_bl y * 0x20)" + apply (clarsimp simp: cnode_offs_range_def s0_ptr_defs) + apply (rule_tac x="to_bl (ucast ((x - ptr) div 0x20) :: 10 word)" in exI) + apply (clarsimp simp: to_bl_ucast of_bl_drop) + apply (subst le_mask_eq) + apply simp + apply (rule word_div_mult') + apply simp + apply simp + apply (rule word_diff_ls') + apply (drule_tac a=x and n=5 in aligned_le_sharp) + apply simp + apply (simp add: add.commute) + apply (subst(asm) mask_out_add_aligned[symmetric]) + apply (erule is_aligned_weaken) + apply simp + apply (simp add: mask_def) + apply simp + apply (clarsimp simp: neg_mask_is_div[where n=5, simplified, symmetric]) + apply (subst is_aligned_neg_mask_eq) + apply (rule aligned_sub_aligned) + apply assumption + apply (erule is_aligned_weaken) + apply simp + apply simp + apply simp + done + +lemma cnode_offs_range_correct: + "x \ cnode_offs_range Low_cnode_ptr \ \y. length y = 10 \ (x = Low_cnode_ptr + of_bl y * 0x20)" + "x \ cnode_offs_range High_cnode_ptr \ \y. length y = 10 \ (x = High_cnode_ptr + of_bl y * 0x20)" + "x \ cnode_offs_range Silc_cnode_ptr \ \y. length y = 10 \ (x = Silc_cnode_ptr + of_bl y * 0x20)" + by (simp_all add: cnode_offs_range_correct' s0_ptrs_aligned) + + +text \TCB offset lemmas\ + +lemma tcb_offs_min': + "is_aligned ptr 10 \ (ptr :: obj_ref) \ ptr + ucast (x :: 10 word)" + apply (erule is_aligned_no_wrap') + apply (cut_tac x=x and 'a=64 in ucast_less) + apply simp + apply simp + done + +lemma tcb_offs_min: + "Low_tcb_ptr \ Low_tcb_ptr + ucast (x :: 10 word)" + "High_tcb_ptr \ High_tcb_ptr + ucast (x :: 10 word)" + "idle_tcb_ptr \ idle_tcb_ptr + ucast (x :: 10 word)" + by (simp_all add: tcb_offs_min' s0_ptrs_aligned) + +lemma tcb_offs_max': + "is_aligned ptr 10 \ (ptr :: obj_ref) + ucast (x :: 10 word) \ ptr + 0x3ff" + apply (rule word_plus_mono_right) + apply (rule plus_one_helper) + apply (cut_tac ucast_less[where x=x and 'a=64]) + apply simp + apply simp + apply (drule is_aligned_no_overflow) + apply (simp add: add.commute) + done + +lemma tcb_offs_max: + "Low_tcb_ptr + ucast (x :: 10 word) \ Low_tcb_ptr + 0x3ff" + "High_tcb_ptr + ucast (x :: 10 word) \ High_tcb_ptr + 0x3ff" + "idle_tcb_ptr + ucast (x :: 10 word) \ idle_tcb_ptr + 0x3ff" + by (simp_all add: tcb_offs_max' s0_ptrs_aligned) + +definition tcb_offs_range where + "tcb_offs_range (ptr :: obj_ref) \ {x. ptr \ x \ x \ ptr + 2 ^ 10 - 1}" + +lemma tcb_offs_in_range': + "is_aligned ptr 10 \ ptr + ucast (x :: 10 word) \ tcb_offs_range ptr" + by (clarsimp simp: tcb_offs_min' tcb_offs_max' tcb_offs_range_def add.commute) + +lemma tcb_offs_in_range: + "Low_tcb_ptr + ucast (x :: 10 word) \ tcb_offs_range Low_tcb_ptr" + "High_tcb_ptr + ucast (x :: 10 word) \ tcb_offs_range High_tcb_ptr" + "idle_tcb_ptr + ucast (x :: 10 word) \ tcb_offs_range idle_tcb_ptr" + by (simp_all add: tcb_offs_in_range' s0_ptrs_aligned) + +lemma tcb_offs_range_correct': + "\ x \ tcb_offs_range ptr; is_aligned ptr 10 \ + \ \y. x = ptr + ucast (y :: 10 word)" + apply (clarsimp simp: tcb_offs_range_def s0_ptr_defs) + apply (rule_tac x="ucast (x - ptr)" in exI) + apply (clarsimp simp: ucast_ucast_mask) + apply (rule_tac n=10 in mask_eqI) + apply (subst mask_add_aligned) + apply (simp add: is_aligned_def) + apply (simp add: mask_twice) + apply (subst diff_conv_add_uminus) + apply (subst add.commute[symmetric]) + apply (subst mask_add_aligned) + apply (simp add: is_aligned_minus) + apply simp + apply (subst diff_conv_add_uminus) + apply (subst add_mask_lower_bits) + apply (simp add: is_aligned_def) + apply clarsimp + apply (cut_tac x=x and y="ptr + 0x3FF" and n=10 in neg_mask_mono_le) + apply (simp add: add.commute) + apply (drule_tac n=10 in aligned_le_sharp) + apply (simp add: is_aligned_def) + apply (simp add: add.commute) + apply (subst(asm) mask_out_add_aligned[symmetric]) + apply (erule is_aligned_weaken) + apply simp + apply (simp add: mask_def) + done + +lemma tcb_offs_range_correct: + "x \ tcb_offs_range Low_tcb_ptr \ \y. x = Low_tcb_ptr + ucast (y:: 10 word)" + "x \ tcb_offs_range High_tcb_ptr \ \y. x = High_tcb_ptr + ucast (y:: 10 word)" + "x \ tcb_offs_range idle_tcb_ptr \ \y. x = idle_tcb_ptr + ucast (y:: 10 word)" + by (simp_all add: tcb_offs_range_correct' s0_ptrs_aligned) + +lemma caps_dom_length_10: + "Silc_caps x = Some y \ length x = 10" + "High_caps x = Some y \ length x = 10" + "Low_caps x = Some y \ length x = 10" + by (auto simp: Silc_caps_def High_caps_def Low_caps_def the_nat_to_bl_def nat_to_bl_def + split: if_splits) + +lemma dom_caps: + "dom Silc_caps = {x. length x = 10}" + "dom High_caps = {x. length x = 10}" + "dom Low_caps = {x. length x = 10}" + by (auto simp: Silc_caps_def High_caps_def Low_caps_def the_nat_to_bl_def nat_to_bl_def dom_def + split: if_split_asm) + +lemmas kh0H_obj_def = + Low_cte_def High_cte_def Silc_cte_def ntfnH_def irq_cte_def Low_pdH_def + High_pdH_def Low_ptH_def High_ptH_def Low_tcbH_def High_tcbH_def idle_tcbH_def + global_ptH_def shared_pageH_def High_poolH_def Low_poolH_def + +lemmas kh0H_all_obj_def = + Low_pd'H_def High_pd'H_def Low_pt'H_def High_pt'H_def global_pteH_def global_pteH'_def + Low_cte'_def Low_capsH_def High_cte'_def High_capsH_def + Silc_cte'_def Silc_capsH_def empty_cte_def kh0H_obj_def + +lemma not_in_range_None: + "x \ cnode_offs_range Low_cnode_ptr \ Low_cte Low_cnode_ptr x = None" + "x \ cnode_offs_range High_cnode_ptr \ High_cte High_cnode_ptr x = None" + "x \ cnode_offs_range Silc_cnode_ptr \ Silc_cte Silc_cnode_ptr x = None" + "x \ pt_offs_range Low_pd_ptr \ Low_pdH Low_pd_ptr x = None" + "x \ pt_offs_range High_pd_ptr \ High_pdH High_pd_ptr x = None" + "x \ pt_offs_range riscv_global_pt_ptr \ global_ptH riscv_global_pt_ptr x = None" + "x \ pt_offs_range Low_pt_ptr \ Low_ptH Low_pt_ptr x = None" + "x \ pt_offs_range High_pt_ptr \ High_ptH High_pt_ptr x = None" + "x \ page_offs_range shared_page_ptr_virt \ shared_pageH shared_page_ptr_virt x = None" + by (auto simp: page_offs_range_def cnode_offs_range_def pt_offs_range_def s0_ptr_defs kh0H_obj_def) + +lemma kh0H_dom_distinct: + "idle_tcb_ptr \ cnode_offs_range Silc_cnode_ptr" + "High_tcb_ptr \ cnode_offs_range Silc_cnode_ptr" + "Low_tcb_ptr \ cnode_offs_range Silc_cnode_ptr" + "High_pool_ptr \ cnode_offs_range Silc_cnode_ptr" + "Low_pool_ptr \ cnode_offs_range Silc_cnode_ptr" + "irq_cnode_ptr \ cnode_offs_range Silc_cnode_ptr" + "ntfn_ptr \ cnode_offs_range Silc_cnode_ptr" + "idle_tcb_ptr \ cnode_offs_range Low_cnode_ptr" + "High_tcb_ptr \ cnode_offs_range Low_cnode_ptr" + "Low_tcb_ptr \ cnode_offs_range Low_cnode_ptr" + "High_pool_ptr \ cnode_offs_range Low_cnode_ptr" + "Low_pool_ptr \ cnode_offs_range Low_cnode_ptr" + "irq_cnode_ptr \ cnode_offs_range Low_cnode_ptr" + "ntfn_ptr \ cnode_offs_range Low_cnode_ptr" + "idle_tcb_ptr \ cnode_offs_range High_cnode_ptr" + "High_tcb_ptr \ cnode_offs_range High_cnode_ptr" + "Low_tcb_ptr \ cnode_offs_range High_cnode_ptr" + "High_pool_ptr \ cnode_offs_range High_cnode_ptr" + "Low_pool_ptr \ cnode_offs_range High_cnode_ptr" + "irq_cnode_ptr \ cnode_offs_range High_cnode_ptr" + "ntfn_ptr \ cnode_offs_range High_cnode_ptr" + "idle_tcb_ptr \ pt_offs_range Low_pd_ptr" + "High_tcb_ptr \ pt_offs_range Low_pd_ptr" + "Low_tcb_ptr \ pt_offs_range Low_pd_ptr" + "High_pool_ptr \ pt_offs_range Low_pd_ptr" + "Low_pool_ptr \ pt_offs_range Low_pd_ptr" + "irq_cnode_ptr \ pt_offs_range Low_pd_ptr" + "ntfn_ptr \ pt_offs_range Low_pd_ptr" + "idle_tcb_ptr \ pt_offs_range High_pd_ptr" + "High_tcb_ptr \ pt_offs_range High_pd_ptr" + "Low_tcb_ptr \ pt_offs_range High_pd_ptr" + "High_pool_ptr \ pt_offs_range High_pd_ptr" + "Low_pool_ptr \ pt_offs_range High_pd_ptr" + "irq_cnode_ptr \ pt_offs_range High_pd_ptr" + "ntfn_ptr \ pt_offs_range High_pd_ptr" + "idle_tcb_ptr \ pt_offs_range riscv_global_pt_ptr" + "High_tcb_ptr \ pt_offs_range riscv_global_pt_ptr" + "Low_tcb_ptr \ pt_offs_range riscv_global_pt_ptr" + "High_pool_ptr \ pt_offs_range riscv_global_pt_ptr" + "Low_pool_ptr \ pt_offs_range riscv_global_pt_ptr" + "irq_cnode_ptr \ pt_offs_range riscv_global_pt_ptr" + "ntfn_ptr \ pt_offs_range riscv_global_pt_ptr" + "idle_tcb_ptr \ pt_offs_range Low_pt_ptr" + "High_tcb_ptr \ pt_offs_range Low_pt_ptr" + "Low_tcb_ptr \ pt_offs_range Low_pt_ptr" + "High_pool_ptr \ pt_offs_range Low_pt_ptr" + "Low_pool_ptr \ pt_offs_range Low_pt_ptr" + "irq_cnode_ptr \ pt_offs_range Low_pt_ptr" + "ntfn_ptr \ pt_offs_range Low_pt_ptr" + "idle_tcb_ptr \ pt_offs_range High_pt_ptr" + "High_tcb_ptr \ pt_offs_range High_pt_ptr" + "Low_tcb_ptr \ pt_offs_range High_pt_ptr" + "High_pool_ptr \ pt_offs_range High_pt_ptr" + "Low_pool_ptr \ pt_offs_range High_pt_ptr" + "irq_cnode_ptr \ pt_offs_range High_pt_ptr" + "ntfn_ptr \ pt_offs_range High_pt_ptr" + "idle_tcb_ptr \ tcb_offs_range Low_tcb_ptr" + "High_tcb_ptr \ tcb_offs_range Low_tcb_ptr" + "High_pool_ptr \ tcb_offs_range Low_tcb_ptr" + "Low_pool_ptr \ tcb_offs_range Low_tcb_ptr" + "irq_cnode_ptr \ tcb_offs_range Low_tcb_ptr" + "ntfn_ptr \ tcb_offs_range Low_tcb_ptr" + "idle_tcb_ptr \ tcb_offs_range High_tcb_ptr" + "Low_tcb_ptr \ tcb_offs_range High_tcb_ptr" + "High_pool_ptr \ tcb_offs_range High_tcb_ptr" + "Low_pool_ptr \ tcb_offs_range High_tcb_ptr" + "irq_cnode_ptr \ tcb_offs_range High_tcb_ptr" + "ntfn_ptr \ tcb_offs_range High_tcb_ptr" + "High_tcb_ptr \ tcb_offs_range idle_tcb_ptr" + "Low_tcb_ptr \ tcb_offs_range idle_tcb_ptr" + "High_pool_ptr \ tcb_offs_range idle_tcb_ptr" + "Low_pool_ptr \ tcb_offs_range idle_tcb_ptr" + "irq_cnode_ptr \ tcb_offs_range idle_tcb_ptr" + "ntfn_ptr \ tcb_offs_range idle_tcb_ptr" + "idle_tcb_ptr \ page_offs_range shared_page_ptr_virt" + "High_tcb_ptr \ page_offs_range shared_page_ptr_virt" + "Low_tcb_ptr \ page_offs_range shared_page_ptr_virt" + "High_pool_ptr \ page_offs_range shared_page_ptr_virt" + "Low_pool_ptr \ page_offs_range shared_page_ptr_virt" + "irq_cnode_ptr \ page_offs_range shared_page_ptr_virt" + "ntfn_ptr \ page_offs_range shared_page_ptr_virt" + by (auto simp: tcb_offs_range_def pt_offs_range_def page_offs_range_def + cnode_offs_range_def kh0H_obj_def s0_ptr_defs) + +lemma kh0H_dom_sets_distinct: + "irq_node_offs_range \ cnode_offs_range Silc_cnode_ptr = {}" + "irq_node_offs_range \ cnode_offs_range High_cnode_ptr = {}" + "irq_node_offs_range \ cnode_offs_range Low_cnode_ptr = {}" + "irq_node_offs_range \ pt_offs_range riscv_global_pt_ptr = {}" + "irq_node_offs_range \ pt_offs_range High_pd_ptr = {}" + "irq_node_offs_range \ pt_offs_range Low_pd_ptr = {}" + "irq_node_offs_range \ pt_offs_range High_pt_ptr = {}" + "irq_node_offs_range \ pt_offs_range Low_pt_ptr = {}" + "irq_node_offs_range \ tcb_offs_range High_tcb_ptr = {}" + "irq_node_offs_range \ tcb_offs_range Low_tcb_ptr = {}" + "irq_node_offs_range \ tcb_offs_range idle_tcb_ptr = {}" + "irq_node_offs_range \ page_offs_range shared_page_ptr_virt = {}" + "cnode_offs_range Silc_cnode_ptr \ cnode_offs_range High_cnode_ptr = {}" + "cnode_offs_range Silc_cnode_ptr \ cnode_offs_range Low_cnode_ptr = {}" + "cnode_offs_range Silc_cnode_ptr \ pt_offs_range riscv_global_pt_ptr = {}" + "cnode_offs_range Silc_cnode_ptr \ pt_offs_range High_pd_ptr = {}" + "cnode_offs_range Silc_cnode_ptr \ pt_offs_range Low_pd_ptr = {}" + "cnode_offs_range Silc_cnode_ptr \ pt_offs_range High_pt_ptr = {}" + "cnode_offs_range Silc_cnode_ptr \ pt_offs_range Low_pt_ptr = {}" + "cnode_offs_range Silc_cnode_ptr \ tcb_offs_range High_tcb_ptr = {}" + "cnode_offs_range Silc_cnode_ptr \ tcb_offs_range Low_tcb_ptr = {}" + "cnode_offs_range Silc_cnode_ptr \ tcb_offs_range idle_tcb_ptr = {}" + "cnode_offs_range Silc_cnode_ptr \ page_offs_range shared_page_ptr_virt = {}" + "cnode_offs_range High_cnode_ptr \ cnode_offs_range Low_cnode_ptr = {}" + "cnode_offs_range High_cnode_ptr \ pt_offs_range riscv_global_pt_ptr = {}" + "cnode_offs_range High_cnode_ptr \ pt_offs_range High_pd_ptr = {}" + "cnode_offs_range High_cnode_ptr \ pt_offs_range Low_pd_ptr = {}" + "cnode_offs_range High_cnode_ptr \ pt_offs_range High_pt_ptr = {}" + "cnode_offs_range High_cnode_ptr \ pt_offs_range Low_pt_ptr = {}" + "cnode_offs_range High_cnode_ptr \ tcb_offs_range High_tcb_ptr = {}" + "cnode_offs_range High_cnode_ptr \ tcb_offs_range Low_tcb_ptr = {}" + "cnode_offs_range High_cnode_ptr \ tcb_offs_range idle_tcb_ptr = {}" + "cnode_offs_range High_cnode_ptr \ page_offs_range shared_page_ptr_virt = {}" + "cnode_offs_range Low_cnode_ptr \ pt_offs_range riscv_global_pt_ptr = {}" + "cnode_offs_range Low_cnode_ptr \ pt_offs_range High_pd_ptr = {}" + "cnode_offs_range Low_cnode_ptr \ pt_offs_range Low_pd_ptr = {}" + "cnode_offs_range Low_cnode_ptr \ pt_offs_range High_pt_ptr = {}" + "cnode_offs_range Low_cnode_ptr \ pt_offs_range Low_pt_ptr = {}" + "cnode_offs_range Low_cnode_ptr \ tcb_offs_range High_tcb_ptr = {}" + "cnode_offs_range Low_cnode_ptr \ tcb_offs_range Low_tcb_ptr = {}" + "cnode_offs_range Low_cnode_ptr \ tcb_offs_range idle_tcb_ptr = {}" + "cnode_offs_range Low_cnode_ptr \ page_offs_range shared_page_ptr_virt = {}" + "pt_offs_range riscv_global_pt_ptr \ pt_offs_range High_pd_ptr = {}" + "pt_offs_range riscv_global_pt_ptr \ pt_offs_range Low_pd_ptr = {}" + "pt_offs_range riscv_global_pt_ptr \ pt_offs_range High_pt_ptr = {}" + "pt_offs_range riscv_global_pt_ptr \ pt_offs_range Low_pt_ptr = {}" + "pt_offs_range riscv_global_pt_ptr \ tcb_offs_range High_tcb_ptr = {}" + "pt_offs_range riscv_global_pt_ptr \ tcb_offs_range Low_tcb_ptr = {}" + "pt_offs_range riscv_global_pt_ptr \ tcb_offs_range idle_tcb_ptr = {}" + "pt_offs_range riscv_global_pt_ptr \ page_offs_range shared_page_ptr_virt = {}" + "pt_offs_range High_pd_ptr \ pt_offs_range Low_pd_ptr = {}" + "pt_offs_range High_pd_ptr \ pt_offs_range High_pt_ptr = {}" + "pt_offs_range High_pd_ptr \ pt_offs_range Low_pt_ptr = {}" + "pt_offs_range High_pd_ptr \ tcb_offs_range High_tcb_ptr = {}" + "pt_offs_range High_pd_ptr \ tcb_offs_range Low_tcb_ptr = {}" + "pt_offs_range High_pd_ptr \ tcb_offs_range idle_tcb_ptr = {}" + "pt_offs_range High_pd_ptr \ page_offs_range shared_page_ptr_virt = {}" + "pt_offs_range Low_pd_ptr \ pt_offs_range High_pt_ptr = {}" + "pt_offs_range Low_pd_ptr \ pt_offs_range Low_pt_ptr = {}" + "pt_offs_range Low_pd_ptr \ tcb_offs_range High_tcb_ptr = {}" + "pt_offs_range Low_pd_ptr \ tcb_offs_range Low_tcb_ptr = {}" + "pt_offs_range Low_pd_ptr \ tcb_offs_range idle_tcb_ptr = {}" + "pt_offs_range Low_pd_ptr \ page_offs_range shared_page_ptr_virt = {}" + "pt_offs_range High_pt_ptr \ pt_offs_range Low_pt_ptr = {}" + "pt_offs_range High_pt_ptr \ tcb_offs_range High_tcb_ptr = {}" + "pt_offs_range High_pt_ptr \ tcb_offs_range Low_tcb_ptr = {}" + "pt_offs_range High_pt_ptr \ tcb_offs_range idle_tcb_ptr = {}" + "pt_offs_range High_pt_ptr \ page_offs_range shared_page_ptr_virt = {}" + "pt_offs_range Low_pt_ptr \ tcb_offs_range High_tcb_ptr = {}" + "pt_offs_range Low_pt_ptr \ tcb_offs_range Low_tcb_ptr = {}" + "pt_offs_range Low_pt_ptr \ tcb_offs_range idle_tcb_ptr = {}" + "pt_offs_range Low_pt_ptr \ page_offs_range shared_page_ptr_virt = {}" + "tcb_offs_range High_tcb_ptr \ tcb_offs_range Low_tcb_ptr = {}" + "tcb_offs_range High_tcb_ptr \ tcb_offs_range idle_tcb_ptr = {}" + "tcb_offs_range High_tcb_ptr \ page_offs_range shared_page_ptr_virt = {}" + "tcb_offs_range Low_tcb_ptr \ tcb_offs_range idle_tcb_ptr = {}" + "tcb_offs_range Low_tcb_ptr \ page_offs_range shared_page_ptr_virt = {}" + "page_offs_range shared_page_ptr_virt \ tcb_offs_range idle_tcb_ptr = {}" + by (rule disjointI, clarsimp simp: tcb_offs_range_def pt_offs_range_def page_offs_range_def + irq_node_offs_range_def cnode_offs_range_def s0_ptr_defs + , drule (1) order_trans le_less_trans, fastforce)+ + +lemmas offs_in_range = + pt_offs_in_range page_offs_in_range tcb_offs_in_range cnode_offs_in_range irq_node_offs_in_range + +lemmas offs_range_correct = + pt_offs_range_correct page_offs_range_correct tcb_offs_range_correct + cnode_offs_range_correct irq_node_offs_range_correct + +lemma kh0H_dom_distinct': + fixes y :: pt_index + shows + "length x = 10 \ Silc_cnode_ptr + of_bl x * 0x20 \ idle_tcb_ptr" + "length x = 10 \ Silc_cnode_ptr + of_bl x * 0x20 \ High_tcb_ptr" + "length x = 10 \ Silc_cnode_ptr + of_bl x * 0x20 \ Low_tcb_ptr" + "length x = 10 \ Silc_cnode_ptr + of_bl x * 0x20 \ High_pool_ptr" + "length x = 10 \ Silc_cnode_ptr + of_bl x * 0x20 \ Low_pool_ptr" + "length x = 10 \ Silc_cnode_ptr + of_bl x * 0x20 \ irq_cnode_ptr" + "length x = 10 \ Silc_cnode_ptr + of_bl x * 0x20 \ ntfn_ptr" + "length x = 10 \ Low_cnode_ptr + of_bl x * 0x20 \ idle_tcb_ptr" + "length x = 10 \ Low_cnode_ptr + of_bl x * 0x20 \ High_tcb_ptr" + "length x = 10 \ Low_cnode_ptr + of_bl x * 0x20 \ High_pool_ptr" + "length x = 10 \ Low_cnode_ptr + of_bl x * 0x20 \ Low_pool_ptr" + "length x = 10 \ Low_cnode_ptr + of_bl x * 0x20 \ Low_tcb_ptr" + "length x = 10 \ Low_cnode_ptr + of_bl x * 0x20 \ irq_cnode_ptr" + "length x = 10 \ Low_cnode_ptr + of_bl x * 0x20 \ ntfn_ptr" + "length x = 10 \ High_cnode_ptr + of_bl x * 0x20 \ idle_tcb_ptr" + "length x = 10 \ High_cnode_ptr + of_bl x * 0x20 \ High_tcb_ptr" + "length x = 10 \ High_cnode_ptr + of_bl x * 0x20 \ Low_tcb_ptr" + "length x = 10 \ High_cnode_ptr + of_bl x * 0x20 \ High_pool_ptr" + "length x = 10 \ High_cnode_ptr + of_bl x * 0x20 \ Low_pool_ptr" + "length x = 10 \ High_cnode_ptr + of_bl x * 0x20 \ irq_cnode_ptr" + "length x = 10 \ High_cnode_ptr + of_bl x * 0x20 \ ntfn_ptr" + "Low_pd_ptr + (ucast y << 3) \ idle_tcb_ptr" + "Low_pd_ptr + (ucast y << 3) \ High_tcb_ptr" + "Low_pd_ptr + (ucast y << 3) \ Low_tcb_ptr" + "Low_pd_ptr + (ucast y << 3) \ High_pool_ptr" + "Low_pd_ptr + (ucast y << 3) \ Low_pool_ptr" + "Low_pd_ptr + (ucast y << 3) \ irq_cnode_ptr" + "Low_pd_ptr + (ucast y << 3) \ ntfn_ptr" + "High_pd_ptr + (ucast y << 3) \ idle_tcb_ptr" + "High_pd_ptr + (ucast y << 3) \ High_tcb_ptr" + "High_pd_ptr + (ucast y << 3) \ Low_tcb_ptr" + "High_pd_ptr + (ucast y << 3) \ High_pool_ptr" + "High_pd_ptr + (ucast y << 3) \ Low_pool_ptr" + "High_pd_ptr + (ucast y << 3) \ irq_cnode_ptr" + "High_pd_ptr + (ucast y << 3) \ ntfn_ptr" + "Low_pt_ptr + (ucast y << 3) \ idle_tcb_ptr" + "Low_pt_ptr + (ucast y << 3) \ High_tcb_ptr" + "Low_pt_ptr + (ucast y << 3) \ Low_tcb_ptr" + "Low_pt_ptr + (ucast y << 3) \ High_pool_ptr" + "Low_pt_ptr + (ucast y << 3) \ Low_pool_ptr" + "Low_pt_ptr + (ucast y << 3) \ irq_cnode_ptr" + "Low_pt_ptr + (ucast y << 3) \ ntfn_ptr" + "High_pt_ptr + (ucast y << 3) \ idle_tcb_ptr" + "High_pt_ptr + (ucast y << 3) \ High_tcb_ptr" + "High_pt_ptr + (ucast y << 3) \ Low_tcb_ptr" + "High_pt_ptr + (ucast y << 3) \ High_pool_ptr" + "High_pt_ptr + (ucast y << 3) \ Low_pool_ptr" + "High_pt_ptr + (ucast y << 3) \ irq_cnode_ptr" + "High_pt_ptr + (ucast y << 3) \ ntfn_ptr" + "riscv_global_pt_ptr + (ucast y << 3) \ idle_tcb_ptr" + "riscv_global_pt_ptr + (ucast y << 3) \ High_tcb_ptr" + "riscv_global_pt_ptr + (ucast y << 3) \ Low_tcb_ptr" + "riscv_global_pt_ptr + (ucast y << 3) \ High_pool_ptr" + "riscv_global_pt_ptr + (ucast y << 3) \ Low_pool_ptr" + "riscv_global_pt_ptr + (ucast y << 3) \ irq_cnode_ptr" + "riscv_global_pt_ptr + (ucast y << 3) \ ntfn_ptr" + "shared_page_ptr_virt + (ucast y << 12) \ idle_tcb_ptr" + "shared_page_ptr_virt + (ucast y << 12) \ High_tcb_ptr" + "shared_page_ptr_virt + (ucast y << 12) \ Low_tcb_ptr" + "shared_page_ptr_virt + (ucast y << 12) \ High_pool_ptr" + "shared_page_ptr_virt + (ucast y << 12) \ Low_pool_ptr" + "shared_page_ptr_virt + (ucast y << 12) \ irq_cnode_ptr" + "shared_page_ptr_virt + (ucast y << 12) \ ntfn_ptr" + apply (drule offs_in_range, fastforce simp: kh0H_dom_distinct)+ + apply (cut_tac x=y in offs_in_range(1), fastforce simp: kh0H_dom_distinct)+ + apply (cut_tac x=y in offs_in_range(2), fastforce simp: kh0H_dom_distinct)+ + apply (cut_tac x=y in offs_in_range(3), fastforce simp: kh0H_dom_distinct)+ + apply (cut_tac x=y in offs_in_range(4), fastforce simp: kh0H_dom_distinct)+ + apply (cut_tac x=y in offs_in_range(5), fastforce simp: kh0H_dom_distinct)+ + apply (cut_tac x=y in offs_in_range(6), fastforce simp: kh0H_dom_distinct)+ + done + +lemma not_disjointI: + "\ x = y; x \ A; y \ B \ \ A \ B \ {}" + by fastforce + +lemma shared_pageH_KOUserData[simp]: + "shared_pageH shared_page_ptr_virt (shared_page_ptr_virt + (UCAST(9 \ 64) y << 12)) = Some KOUserData" + apply (clarsimp simp: shared_pageH_def page_offs_min page_offs_max add.commute) + apply (cut_tac shared_page_ptr_is_aligned) + apply (clarsimp simp: is_aligned_mask mask_def s0_ptr_defs bit_simps) + apply word_bitwise + done + +lemma kh0H_simps[simp]: + fixes y :: pt_index + shows + "kh0H (init_irq_node_ptr + (ucast (irq :: irq) << 5)) = Some (KOCTE (CTE NullCap Null_mdb))" + "kh0H ntfn_ptr = Some (KONotification ntfnH)" + "kh0H irq_cnode_ptr = Some (KOCTE irq_cte)" + "kh0H Low_pool_ptr = Some (KOArch Low_poolH)" + "kh0H High_pool_ptr = Some (KOArch High_poolH)" + "kh0H Low_tcb_ptr = Some (KOTCB Low_tcbH)" + "kh0H High_tcb_ptr = Some (KOTCB High_tcbH)" + "kh0H idle_tcb_ptr = Some (KOTCB idle_tcbH)" + "length x = 10 \ kh0H (Low_cnode_ptr + of_bl x * 0x20) = Low_cte Low_cnode_ptr (Low_cnode_ptr + of_bl x * 0x20)" + "length x = 10 \ kh0H (High_cnode_ptr + of_bl x * 0x20) = High_cte High_cnode_ptr (High_cnode_ptr + of_bl x * 0x20)" + "length x = 10 \ kh0H (Silc_cnode_ptr + of_bl x * 0x20) = Silc_cte Silc_cnode_ptr (Silc_cnode_ptr + of_bl x * 0x20)" + "kh0H (Low_pd_ptr + (ucast y << 3)) = Low_pdH Low_pd_ptr (Low_pd_ptr + (ucast y << 3))" + "kh0H (High_pd_ptr + (ucast y << 3)) = High_pdH High_pd_ptr (High_pd_ptr + (ucast y << 3))" + "kh0H (Low_pt_ptr + (ucast y << 3)) = Low_ptH Low_pt_ptr (Low_pt_ptr + (ucast y << 3))" + "kh0H (High_pt_ptr + (ucast y << 3)) = High_ptH High_pt_ptr (High_pt_ptr + (ucast y << 3))" + "kh0H (riscv_global_pt_ptr + (ucast y << 3)) = global_ptH riscv_global_pt_ptr (riscv_global_pt_ptr + (ucast y << 3))" + "kh0H (shared_page_ptr_virt + (ucast y << 12)) = Some KOUserData" + supply option.case_cong[cong] + apply (fastforce simp: kh0H_def option_update_range_def) + by ((clarsimp simp: kh0H_def kh0H_dom_distinct kh0H_dom_distinct' + option_update_range_def not_in_range_None offs_in_range + | simp add: offs_in_range kh0H_dom_sets_distinct[THEN orthD1] not_in_range_None + | simp add: offs_in_range kh0H_dom_sets_distinct[THEN orthD2] not_in_range_None + | rule conjI | clarsimp split: option.splits)+, + drule not_disjointI, + (erule offs_in_range | rule offs_in_range), + (erule offs_in_range | rule offs_in_range), + erule notE, rule kh0H_dom_sets_distinct)+ + +lemma kh0H_dom: + "dom kh0H = {idle_tcb_ptr, High_tcb_ptr, Low_tcb_ptr, + High_pool_ptr, Low_pool_ptr, irq_cnode_ptr, ntfn_ptr} \ + irq_node_offs_range \ + page_offs_range shared_page_ptr_virt \ + cnode_offs_range Silc_cnode_ptr \ + cnode_offs_range High_cnode_ptr \ + cnode_offs_range Low_cnode_ptr \ + pt_offs_range riscv_global_pt_ptr \ + pt_offs_range High_pd_ptr \ + pt_offs_range Low_pd_ptr \ + pt_offs_range High_pt_ptr \ + pt_offs_range Low_pt_ptr" + apply (rule equalityI) + apply (simp add: kh0H_def dom_def) + apply (clarsimp simp: offs_in_range option_update_range_def not_in_range_None split: if_split_asm) + apply (clarsimp simp: dom_def) + apply (rule conjI) + apply (force simp: kh0H_def kh0H_dom_distinct option_update_range_def not_in_range_None + dest: irq_node_offs_range_correct split: option.splits) + by (rule conjI + | clarsimp simp: kh0H_def kh0H_dom_distinct option_update_range_def not_in_range_None + split: option.splits + , frule offs_range_correct + , clarsimp simp: kh0H_all_obj_def cnode_offs_range_def page_offs_range_def pt_offs_range_def + split: if_split_asm)+ + +lemmas kh0H_SomeD' = set_mp[OF equalityD1[OF kh0H_dom[simplified dom_def]], OF CollectI, simplified, OF exI] + +lemma kh0H_SomeD: + "kh0H x = Some y + \ x = ntfn_ptr \ y = KONotification ntfnH \ + x = Low_tcb_ptr \ y = KOTCB Low_tcbH \ + x = High_tcb_ptr \ y = KOTCB High_tcbH \ + x = idle_tcb_ptr \ y = KOTCB idle_tcbH \ + x \ pt_offs_range riscv_global_pt_ptr \ global_ptH riscv_global_pt_ptr x \ None + \ y = the (global_ptH riscv_global_pt_ptr x) \ + x \ irq_node_offs_range \ y = KOCTE (CTE NullCap Null_mdb) \ + x \ pt_offs_range Low_pt_ptr \ Low_ptH Low_pt_ptr x \ None \ y = the (Low_ptH Low_pt_ptr x) \ + x \ pt_offs_range High_pt_ptr \ High_ptH High_pt_ptr x \ None \ y = the (High_ptH High_pt_ptr x) \ + x \ pt_offs_range Low_pd_ptr \ Low_pdH Low_pd_ptr x \ None \ y = the (Low_pdH Low_pd_ptr x) \ + x \ pt_offs_range High_pd_ptr \ High_pdH High_pd_ptr x \ None \ y = the (High_pdH High_pd_ptr x) \ + x = Low_pool_ptr \ y = KOArch Low_poolH \ + x = High_pool_ptr \ y = KOArch High_poolH \ + x \ cnode_offs_range Low_cnode_ptr \ Low_cte Low_cnode_ptr x \ None \ y = the (Low_cte Low_cnode_ptr x) \ + x \ cnode_offs_range High_cnode_ptr \ High_cte High_cnode_ptr x \ None \ y = the (High_cte High_cnode_ptr x) \ + x \ cnode_offs_range Silc_cnode_ptr \ Silc_cte Silc_cnode_ptr x \ None \ y = the (Silc_cte Silc_cnode_ptr x) \ + x = irq_cnode_ptr \ y = KOCTE irq_cte \ + x \ page_offs_range shared_page_ptr_virt \ y = KOUserData" + apply (frule kh0H_SomeD') + apply (elim disjE) + by ((clarsimp | drule offs_range_correct)+) + + +definition arch_state0H :: Arch.kernel_state where + "arch_state0H \ + RISCVKernelState [ucast (asid_high_bits_of Low_asid) \ Low_pool_ptr, + ucast (asid_high_bits_of High_asid) \ High_pool_ptr] + (\level. if level = maxPTLevel then [riscv_global_pt_ptr] else []) + init_vspace_uses" + +definition s0H_internal :: "kernel_state" where + "s0H_internal \ \ + ksPSpace = kh0H, + gsUserPages = [shared_page_ptr_virt \ RISCVLargePage], + gsCNodes = (\x. if \irq :: irq. init_irq_node_ptr + (ucast irq << 5) = x + then Some 0 else None) + (Low_cnode_ptr \ 10, + High_cnode_ptr \ 10, + Silc_cnode_ptr \ 10, + irq_cnode_ptr \ 0), + gsUntypedZeroRanges = ran (map_comp untypedZeroRange (option_map cteCap o map_to_ctes kh0H)), + gsMaxObjectSize = card (UNIV :: obj_ref set), + ksDomScheduleIdx = 0, + ksDomSchedule = [(0, 10), (1, 10)], + ksCurDomain = 0, + ksDomainTime = 5, + ksReadyQueues = const (TcbQueue None None), + ksReadyQueuesL1Bitmap = const 0, + ksReadyQueuesL2Bitmap = const 0, + ksCurThread = Low_tcb_ptr, + ksIdleThread = idle_tcb_ptr, + ksSchedulerAction = ResumeCurrentThread, + ksInterruptState = InterruptState init_irq_node_ptr ((\_. IRQInactive) (timer_irq := IRQTimer)), + ksWorkUnitsCompleted = undefined, + ksArchState = arch_state0H, + ksMachineState = machine_state0\" + + +definition Low_cte_cte :: "obj_ref \ obj_ref \ cte option" where + "Low_cte_cte \ \base offs. if is_aligned offs 5 \ base \ offs \ offs \ base + 2 ^ 15 - 1 + then Low_cte' (ucast (offs - base >> 5)) else None" + +definition High_cte_cte :: "obj_ref \ obj_ref \ cte option" where + "High_cte_cte \ \base offs. if is_aligned offs 5 \ base \ offs \ offs \ base + 2 ^ 15 - 1 + then High_cte' (ucast (offs - base >> 5)) else None" + +definition Silc_cte_cte :: "obj_ref \ obj_ref \ cte option" where + "Silc_cte_cte \ \base offs. if is_aligned offs 5 \ base \ offs \ offs \ base + 2 ^ 15 - 1 + then Silc_cte' (ucast (offs - base >> 5)) else None" + +definition Low_tcb_cte :: "obj_ref \ cte option" where + "Low_tcb_cte \ [Low_tcb_ptr \ tcbCTable Low_tcbH, + Low_tcb_ptr + 0x20 \ tcbVTable Low_tcbH, + Low_tcb_ptr + 0x40 \ tcbReply Low_tcbH, + Low_tcb_ptr + 0x60 \ tcbCaller Low_tcbH, + Low_tcb_ptr + 0x80 \ tcbIPCBufferFrame Low_tcbH]" + +definition High_tcb_cte :: "obj_ref \ cte option" where + "High_tcb_cte \ [High_tcb_ptr \ tcbCTable High_tcbH, + High_tcb_ptr + 0x20 \ tcbVTable High_tcbH, + High_tcb_ptr + 0x40 \ tcbReply High_tcbH, + High_tcb_ptr + 0x60 \ tcbCaller High_tcbH, + High_tcb_ptr + 0x80 \ tcbIPCBufferFrame High_tcbH]" + +definition idle_tcb_cte :: "obj_ref \ cte option" where + "idle_tcb_cte \ [idle_tcb_ptr \ tcbCTable idle_tcbH, + idle_tcb_ptr + 0x20 \ tcbVTable idle_tcbH, + idle_tcb_ptr + 0x40 \ tcbReply idle_tcbH, + idle_tcb_ptr + 0x60 \ tcbCaller idle_tcbH, + idle_tcb_ptr + 0x80 \ tcbIPCBufferFrame idle_tcbH]" + + +lemma not_in_range_cte_None: + "x \ cnode_offs_range Low_cnode_ptr \ Low_cte_cte Low_cnode_ptr x = None" + "x \ cnode_offs_range High_cnode_ptr \ High_cte_cte High_cnode_ptr x = None" + "x \ cnode_offs_range Silc_cnode_ptr \ Silc_cte_cte Silc_cnode_ptr x = None" + "x \ tcb_offs_range Low_tcb_ptr \ Low_tcb_cte x = None" + "x \ tcb_offs_range High_tcb_ptr \ High_tcb_cte x = None" + "x \ tcb_offs_range idle_tcb_ptr \ idle_tcb_cte x = None" + by (fastforce simp: cnode_offs_range_def page_offs_range_def tcb_offs_range_def s0_ptr_defs Low_cte_cte_def + High_cte_cte_def Silc_cte_cte_def Low_tcb_cte_def High_tcb_cte_def idle_tcb_cte_def)+ + +lemma mask_neg_le: + "x && ~~ mask n \ x" + apply (clarsimp simp: neg_mask_is_div) + apply (rule word_div_mult_le) + done + +lemma mask_in_tcb_offs_range: + "x && ~~ mask 10 = ptr \ x \ tcb_offs_range ptr" + apply (clarsimp simp: tcb_offs_range_def mask_neg_le objBitsKO_def) + apply (cut_tac and_neg_mask_plus_mask_mono[where p=x and n=10]) + apply (simp add: add.commute mask_def) + done + +lemma set_mem_neq: + "\ y \ S; x \ S \ \ x \ y" + by fastforce + +lemma neg_mask_decompose: + "x && ~~ mask n = ptr \ x = ptr + (x && mask n)" + by (clarsimp simp: AND_NOT_mask_plus_AND_mask_eq) + +lemma opt_None_not_dom: + "m a = None \ a \ dom m" + by (simp add: dom_def) + +lemma tcb_offs_range_mask_eq: + "\ x \ tcb_offs_range ptr; is_aligned ptr 10 \ \ x && ~~ mask 10 = ptr" + apply (drule(1) tcb_offs_range_correct') + apply (clarsimp simp: objBitsKO_def) + apply (drule_tac d="ucast y" in is_aligned_add_helper) + apply (cut_tac x=y and 'a=64 in ucast_less) + apply simp + apply simp + apply simp + done + +lemma not_in_tcb_offs: + "\tcb. kh0H (x && ~~ mask 10) \ Some (KOTCB tcb) + \ x \ tcb_offs_range Low_tcb_ptr" + "\tcb. kh0H (x && ~~ mask 10) \ Some (KOTCB tcb) + \ x \ tcb_offs_range High_tcb_ptr" + "\tcb. kh0H (x && ~~ mask 10) \ Some (KOTCB tcb) + \ x \ tcb_offs_range idle_tcb_ptr" + by (fastforce simp: s0_ptrs_aligned dest: tcb_offs_range_mask_eq)+ + +lemma range_tcb_not_kh0H_dom: + "{(x && ~~ mask 10) + 1..(x && ~~ mask 10) + 2 ^ 10 - 1} \ dom kh0H \ {} \ (x && ~~ mask 10) \ High_tcb_ptr" + "{(x && ~~ mask 10) + 1..(x && ~~ mask 10) + 2 ^ 10 - 1} \ dom kh0H \ {} \ (x && ~~ mask 10) \ Low_tcb_ptr" + "{(x && ~~ mask 10) + 1..(x && ~~ mask 10) + 2 ^ 10 - 1} \ dom kh0H \ {} \ (x && ~~ mask 10) \ idle_tcb_ptr" + apply clarsimp + apply (drule int_not_emptyD) + apply (clarsimp simp: kh0H_dom) + apply (subgoal_tac "xa \ tcb_offs_range High_tcb_ptr") + prefer 2 + apply (clarsimp simp: tcb_offs_range_def objBitsKO_def) + apply (rule_tac y="High_tcb_ptr + 1" in order_trans) + apply (simp add: s0_ptr_defs) + apply simp + apply (clarsimp simp: kh0H_dom_distinct[THEN set_mem_neq]) + apply ((clarsimp simp: kh0H_dom_sets_distinct[THEN orthD2] | + clarsimp simp: kh0H_dom_sets_distinct[THEN orthD1])+)[1] + apply (clarsimp simp: s0_ptr_defs tcb_offs_range_def) + apply clarsimp + apply (drule int_not_emptyD) + apply (clarsimp simp: kh0H_dom) + apply (subgoal_tac "xa \ tcb_offs_range Low_tcb_ptr") + prefer 2 + apply (clarsimp simp: tcb_offs_range_def objBitsKO_def) + apply (rule_tac y="Low_tcb_ptr + 1" in order_trans) + apply (simp add: s0_ptr_defs) + apply simp + apply (clarsimp simp: kh0H_dom_distinct[THEN set_mem_neq]) + apply ((clarsimp simp: kh0H_dom_sets_distinct[THEN orthD2] | + clarsimp simp: kh0H_dom_sets_distinct[THEN orthD1])+)[1] + apply (clarsimp simp: s0_ptr_defs tcb_offs_range_def) + apply clarsimp + apply (drule int_not_emptyD) + apply (clarsimp simp: kh0H_dom) + apply (subgoal_tac "xa \ tcb_offs_range idle_tcb_ptr") + prefer 2 + apply (clarsimp simp: tcb_offs_range_def objBitsKO_def) + apply (rule_tac y="idle_tcb_ptr + 1" in order_trans) + apply (simp add: s0_ptr_defs) + apply simp + apply (clarsimp simp: kh0H_dom_distinct[THEN set_mem_neq]) + apply ((clarsimp simp: kh0H_dom_sets_distinct[THEN orthD2] | + clarsimp simp: kh0H_dom_sets_distinct[THEN orthD1])+)[1] + apply (clarsimp simp: s0_ptr_defs tcb_offs_range_def) + done + +lemma kh0H_dom_tcb: + "kh0H x = Some (KOTCB tcb) + \ x = Low_tcb_ptr \ x = High_tcb_ptr \ x = idle_tcb_ptr" + apply (frule domI[where m="kh0H"]) + apply (simp add: kh0H_dom) + apply (elim disjE) + by (auto dest: offs_range_correct simp: kh0H_all_obj_def s0_ptrs_aligned split: if_split_asm) + +lemma map_to_ctes_kh0H: + "map_to_ctes kh0H = + (option_update_range + (\x. if \irq :: irq. init_irq_node_ptr + (ucast irq << 5) = x + then Some (CTE NullCap Null_mdb) else None) \ + option_update_range (Low_cte_cte Low_cnode_ptr) \ + option_update_range (High_cte_cte High_cnode_ptr) \ + option_update_range (Silc_cte_cte Silc_cnode_ptr) \ + option_update_range [irq_cnode_ptr \ CTE NullCap Null_mdb] \ + option_update_range Low_tcb_cte \ + option_update_range High_tcb_cte \ + option_update_range idle_tcb_cte + ) Map.empty" + supply option.case_cong[cong] if_cong[cong] + supply objBits_defs[simp] + apply (rule ext) + apply (case_tac "kh0H x") + apply (clarsimp simp add: map_to_ctes_def Let_def objBitsKO_def) + apply (rule conjI) + apply clarsimp + apply (thin_tac "x \ y = {}" for x y) + apply (frule kh0H_dom_tcb) + apply (elim disjE) + apply (clarsimp simp: option_update_range_def) + apply (frule mask_in_tcb_offs_range) + apply (clarsimp simp: kh0H_dom_distinct[THEN set_mem_neq]) + apply (simp add: kh0H_dom_sets_distinct[THEN orthD2] not_in_range_cte_None + | simp add: kh0H_dom_sets_distinct[THEN orthD1] not_in_range_cte_None)+ + apply (rule conjI, clarsimp) + apply (clarsimp split: option.splits) + apply (rule conjI) + apply (fastforce simp: tcb_cte_cases_def Low_tcb_cte_def dest: neg_mask_decompose) + subgoal by (fastforce simp: Low_tcb_cte_def tcb_cte_cases_def + split: if_split_asm dest: neg_mask_decompose) + apply (clarsimp simp: option_update_range_def) + apply (frule mask_in_tcb_offs_range) + apply (clarsimp simp: kh0H_dom_distinct[THEN set_mem_neq]) + apply (simp add: kh0H_dom_sets_distinct[THEN orthD2] not_in_range_cte_None + | simp add: kh0H_dom_sets_distinct[THEN orthD1] not_in_range_cte_None)+ + apply (rule conjI, clarsimp) + apply clarsimp + apply (clarsimp split: option.splits) + apply (rule conjI) + apply (fastforce simp: tcb_cte_cases_def High_tcb_cte_def dest: neg_mask_decompose) + subgoal by (fastforce simp: High_tcb_cte_def tcb_cte_cases_def + split: if_split_asm dest: neg_mask_decompose) + apply (clarsimp simp: option_update_range_def) + apply (frule mask_in_tcb_offs_range) + apply (clarsimp simp: kh0H_dom_distinct[THEN set_mem_neq]) + apply (simp add: kh0H_dom_sets_distinct[THEN orthD2] not_in_range_cte_None + | simp add: kh0H_dom_sets_distinct[THEN orthD1] not_in_range_cte_None)+ + apply (rule conjI, clarsimp) + apply clarsimp + apply (clarsimp split: option.splits) + apply (rule conjI) + apply (fastforce simp: tcb_cte_cases_def idle_tcb_cte_def dest: neg_mask_decompose) + subgoal by (fastforce simp: idle_tcb_cte_def tcb_cte_cases_def + split: if_split_asm dest: neg_mask_decompose) + apply (drule_tac m="kh0H" in opt_None_not_dom) + apply (rule conjI) + apply (clarsimp simp: kh0H_dom option_update_range_def) + apply ((clarsimp simp: kh0H_dom_sets_distinct[THEN orthD2] not_in_tcb_offs not_in_range_cte_None offs_in_range + | clarsimp simp: kh0H_dom_sets_distinct[THEN orthD1] not_in_range_cte_None)+)[1] + apply (rule impI) + apply (frule range_tcb_not_kh0H_dom(1)[simplified]) + apply (frule range_tcb_not_kh0H_dom(2)[simplified]) + apply (drule range_tcb_not_kh0H_dom(3)[simplified]) + apply (clarsimp simp: kh0H_dom split del: if_split) + apply (clarsimp simp: option_update_range_def) + apply ((clarsimp simp: kh0H_dom_sets_distinct[THEN orthD2] not_in_tcb_offs not_in_range_cte_None offs_in_range + | clarsimp simp: kh0H_dom_sets_distinct[THEN orthD1] not_in_range_cte_None)+)[1] + apply (subst not_in_range_cte_None, + clarsimp simp: tcb_offs_range_mask_eq s0_ptrs_aligned)+ + apply (clarsimp simp: irq_node_offs_in_range) + apply (frule kh0H_SomeD) + apply (elim disjE) + defer + apply ((clarsimp simp: map_to_ctes_def Let_def split del: if_split, + subst if_split_eq1, rule conjI, rule impI, + (subst is_aligned_neg_mask_eq, simp add: is_aligned_def s0_ptr_defs objBitsKO_def)+, + ((clarsimp simp: option_update_range_def kh0H_dom_distinct not_in_range_cte_None | + clarsimp simp: idle_tcb_cte_def High_tcb_cte_def Low_tcb_cte_def)+)[1], + rule impI, + (subst (asm) is_aligned_neg_mask_eq, simp add: is_aligned_def s0_ptr_defs objBitsKO_def)+, + clarsimp, + clarsimp simp: kh0H_dom objBitsKO_def s0_ptr_defs irq_node_offs_range_def + cnode_offs_range_def pt_offs_range_def page_offs_range_def, + rule FalseE, drule int_not_emptyD, clarsimp, + (elim disjE, (clarsimp | drule(1) order_trans le_less_trans, fastforce)+)[1])+)[3] + defer 2 + apply ((clarsimp simp: map_to_ctes_def Let_def split del: if_split, + subst if_split_eq1, rule conjI, rule impI, drule pt_offs_range_correct, + clarsimp simp: kh0H_obj_def kh0H_dom_distinct option_update_range_def not_in_range_cte_None, + rule impI, subst if_split_eq1, rule conjI, rule impI, rule FalseE, + drule pt_offs_range_correct, clarsimp, cut_tac x=ya and 'a=64 in ucast_less, + simp, drule shiftl_less_t2n'[where n=3], simp, simp, + drule plus_one_helper[where n="0xFFF", simplified], drule kh0H_dom_tcb, + (elim disjE, (clarsimp simp: s0_ptr_defs objBitsKO_def, + erule notE[rotated], + rule_tac a="x::obj_ref" for x in dual_order.strict_implies_not_eq, + rule less_le_trans[rotated, OF aligned_le_sharp], + erule word_plus_mono_right2[rotated], + simp, simp add: is_aligned_def, simp)+)[1], + rule impI, + clarsimp simp: option_update_range_def kh0H_dom_distinct[THEN set_mem_neq] not_in_range_cte_None, + ((clarsimp simp: kh0H_dom_sets_distinct[THEN orthD1] not_in_range_cte_None irq_node_offs_in_range | + clarsimp simp: kh0H_dom_sets_distinct[THEN orthD2] not_in_range_cte_None)+)[1])+)[5] + prefer 8 + apply ((clarsimp simp: map_to_ctes_def Let_def kh0H_obj_def split del: if_split, + subst if_split_eq1, rule conjI, + clarsimp, drule kh0H_dom_tcb, fastforce simp: s0_ptr_defs mask_def objBitsKO_def, + fastforce simp: option_update_range_def kh0H_dom_distinct not_in_range_cte_None)+)[3] + apply ((clarsimp simp: map_to_ctes_def Let_def kh0H_obj_def objBitsKO_def + split: if_split_asm split del: if_split, + subst if_split_eq1, rule conjI, rule impI, + clarsimp simp: option_update_range_def kh0H_dom_distinct not_in_range_cte_None, + (clarsimp simp: option_update_range_def kh0H_dom_distinct[THEN set_mem_neq] + kh0H_dom_sets_distinct[THEN orthD1] not_in_range_cte_None + | simp add: kh0H_dom_sets_distinct[THEN orthD2] not_in_range_cte_None)+, + rule conjI, clarsimp, drule offs_range_correct, + fastforce simp: Low_cte_cte_def High_cte_cte_def Silc_cte_cte_def, + rule impI, rule FalseE, drule offs_range_correct, + clarsimp simp: Low_cte_def High_cte_def Silc_cte_def cnode_offs_min cnode_offs_max, + cut_tac x="of_bl y" and z="0x20::obj_ref" and y="2 ^ 15 - 1" in div_to_mult_word_lt, + frule_tac 'a=64 in of_bl_length_le, simp, simp, drule int_not_emptyD, + clarsimp simp: kh0H_dom s0_ptr_defs cnode_offs_range_def page_offs_range_def + pt_offs_range_def irq_node_offs_range_def, + (elim disjE, (clarsimp simp: s0_ptr_defs, + drule_tac b="x + y * 0x20" and n=5 for x y in aligned_le_sharp, + fastforce simp: is_aligned_def, clarsimp simp: add.commute, + subst (asm) mask_out_add_aligned[symmetric], + simp add: is_aligned_mult_triv2[where n=5, simplified], + simp add: mask_def, drule word_leq_le_minus_one, + subst add.commute, rule neq_0_no_wrap, + erule word_plus_mono_right2[rotated], + fastforce, fastforce, fastforce simp: add.commute | unat_arith)+)[1])+)[3] + apply (clarsimp simp: map_to_ctes_def Let_def kh0H_obj_def split del: if_split, + subst if_split_eq1, rule conjI, rule impI, + clarsimp simp: option_update_range_def kh0H_dom_distinct not_in_range_cte_None, rule impI, + clarsimp simp: kh0H_dom objBitsKO_def s0_ptr_defs is_aligned_def page_offs_range_def + cnode_offs_range_def pt_offs_range_def irq_node_offs_range_def, + rule FalseE, drule int_not_emptyD, clarsimp, + (elim disjE, (clarsimp | drule(1) order_trans le_less_trans, fastforce)+)[1]) + apply (clarsimp simp: map_to_ctes_def Let_def kh0H_obj_def split del: if_split) + apply (subst if_split_eq1) + apply (rule conjI, clarsimp) + apply (fastforce dest: kh0H_dom_tcb simp: page_offs_range_def objBitsKO_def) + apply (clarsimp simp: option_update_range_def kh0H_dom_distinct not_in_range_cte_None) + apply (rule conjI; clarsimp) + apply (rule conjI; clarsimp) + apply (subst is_aligned_neg_mask_eq, clarsimp simp: objBitsKO_def page_offs_range_def is_aligned_weaken)+ + apply (clarsimp split: option.splits) + apply (intro conjI impI; drule kh0H_dom_sets_distinct[THEN orthD2] + kh0H_dom_sets_distinct[THEN orthD1], + drule not_in_range_cte_None, solves clarsimp) + apply (clarsimp simp: map_to_ctes_def Let_def kh0H_obj_def split del: if_split) + apply (frule irq_node_offs_range_correct) + apply (subst if_split_eq1) + apply (rule conjI) + apply (rule impI) + apply (clarsimp simp: option_update_range_def kh0H_dom_distinct not_in_range_cte_None) + apply fastforce + apply (rule impI) + apply clarsimp + apply (erule impE) + apply (rule is_aligned_add) + apply (simp add: is_aligned_def s0_ptr_defs objBitsKO_def) + apply (rule is_aligned_shiftl) + apply (clarsimp simp: objBitsKO_def) + apply (rule FalseE) + apply (clarsimp simp: s0_ptr_defs cnode_offs_range_def page_offs_range_def pt_offs_range_def + irq_node_offs_range_def objBitsKO_def kh0H_dom) + apply (cut_tac x=irq and 'a=64 in ucast_less) + apply simp + apply (drule shiftl_less_t2n'[where n=5]) + apply simp + apply simp + apply (drule plus_one_helper[where n="0x7FF", simplified]) + apply (elim disjE) + apply (unat_arith+)[7] + apply (drule int_not_emptyD) + apply clarsimp + apply (elim disjE, + ((clarsimp, + drule(1) aligned_le_sharp, + clarsimp simp: add.commute, + subst(asm) mask_out_add_aligned[symmetric], + simp add: is_aligned_shiftl, + simp add: mask_def, + drule word_leq_le_minus_one, + subst add.commute, + rule neq_0_no_wrap, + erule word_plus_mono_right2[rotated], + fastforce, + fastforce, + fastforce simp: add.commute) + | unat_arith)+)[1] + done + +lemma option_update_range_map_comp: + "option_update_range m m' = map_add m' m" + by (simp add: fun_eq_iff option_update_range_def map_comp_def map_add_def split: option.split) + +lemma tcb_offs_in_rangeI: + "\ ptr \ ptr + x; ptr + x \ ptr + 2 ^ 10 - 1 \ \ ptr + x \ tcb_offs_range ptr" + by (simp add: tcb_offs_range_def) + +lemma map_to_ctes_kh0H_simps[simp]: + "map_to_ctes kh0H (init_irq_node_ptr + (ucast (irq :: irq) << 5)) = Some (CTE NullCap Null_mdb)" + "map_to_ctes kh0H irq_cnode_ptr = Some (CTE NullCap Null_mdb)" + "length x = 10 \ map_to_ctes kh0H (Low_cnode_ptr + of_bl x * 0x20) = + Low_cte_cte Low_cnode_ptr (Low_cnode_ptr + of_bl x * 0x20)" + "length x = 10 \ map_to_ctes kh0H (High_cnode_ptr + of_bl x * 0x20) = + High_cte_cte High_cnode_ptr (High_cnode_ptr + of_bl x * 0x20)" + "length x = 10 \ map_to_ctes kh0H (Silc_cnode_ptr + of_bl x * 0x20) = + Silc_cte_cte Silc_cnode_ptr (Silc_cnode_ptr + of_bl x * 0x20)" + "map_to_ctes kh0H Low_tcb_ptr = Low_tcb_cte Low_tcb_ptr" + "map_to_ctes kh0H (Low_tcb_ptr + 0x20) = Low_tcb_cte (Low_tcb_ptr + 0x20)" + "map_to_ctes kh0H (Low_tcb_ptr + 0x40) = Low_tcb_cte (Low_tcb_ptr + 0x40)" + "map_to_ctes kh0H (Low_tcb_ptr + 0x60) = Low_tcb_cte (Low_tcb_ptr + 0x60)" + "map_to_ctes kh0H (Low_tcb_ptr + 0x80) = Low_tcb_cte (Low_tcb_ptr + 0x80)" + "map_to_ctes kh0H High_tcb_ptr = High_tcb_cte High_tcb_ptr" + "map_to_ctes kh0H (High_tcb_ptr + 0x20) = High_tcb_cte (High_tcb_ptr + 0x20)" + "map_to_ctes kh0H (High_tcb_ptr + 0x40) = High_tcb_cte (High_tcb_ptr + 0x40)" + "map_to_ctes kh0H (High_tcb_ptr + 0x60) = High_tcb_cte (High_tcb_ptr + 0x60)" + "map_to_ctes kh0H (High_tcb_ptr + 0x80) = High_tcb_cte (High_tcb_ptr + 0x80)" + "map_to_ctes kh0H idle_tcb_ptr = idle_tcb_cte idle_tcb_ptr" + "map_to_ctes kh0H (idle_tcb_ptr + 0x20) = idle_tcb_cte (idle_tcb_ptr + 0x20)" + "map_to_ctes kh0H (idle_tcb_ptr + 0x40) = idle_tcb_cte (idle_tcb_ptr + 0x40)" + "map_to_ctes kh0H (idle_tcb_ptr + 0x60) = idle_tcb_cte (idle_tcb_ptr + 0x60)" + "map_to_ctes kh0H (idle_tcb_ptr + 0x80) = idle_tcb_cte (idle_tcb_ptr + 0x80)" + supply option.case_cong[cong] if_cong[cong] + apply (clarsimp simp: map_to_ctes_kh0H option_update_range_def) + apply fastforce + apply (clarsimp simp: map_to_ctes_kh0H option_update_range_def kh0H_dom_distinct not_in_range_cte_None) + apply ((clarsimp simp: option_update_range_def not_in_range_cte_None cnode_offs_in_range + kh0H_dom_distinct kh0H_dom_distinct' map_to_ctes_kh0H s0_ptrs_aligned, + ((clarsimp simp: offs_in_range kh0H_dom_sets_distinct[THEN orthD1] not_in_range_cte_None | + clarsimp simp: offs_in_range kh0H_dom_sets_distinct[THEN orthD2] not_in_range_cte_None)+)[1], + intro conjI, + (clarsimp, drule not_disjointI, + (erule offs_in_range | rule offs_in_range), + (erule offs_in_range | rule offs_in_range), + erule notE, rule kh0H_dom_sets_distinct)+, + clarsimp split: option.splits)+)[3] + apply (clarsimp simp: option_update_range_def not_in_range_cte_None + map_to_ctes_kh0H kh0H_dom_distinct split: option.splits) + apply (cut_tac ptr="Low_tcb_ptr" and x="0x20" in tcb_offs_in_rangeI, simp add: s0_ptr_defs, simp add: s0_ptr_defs) + apply (clarsimp simp: not_in_range_cte_None option_update_range_def + map_to_ctes_kh0H kh0H_dom_distinct kh0H_dom_distinct' + split: option.splits) + apply ((simp add: offs_in_range kh0H_dom_sets_distinct[THEN orthD1] not_in_range_cte_None + | simp add: offs_in_range kh0H_dom_sets_distinct[THEN orthD2] not_in_range_cte_None)+)[1] + apply (intro conjI impI allI) + apply (simp add: s0_ptr_defs) + apply clarsimp + apply (drule not_disjointI, + rule irq_node_offs_in_range, + assumption, + erule notE, + rule kh0H_dom_sets_distinct) + apply (clarsimp simp: kh0H_dom_distinct) + apply clarsimp + apply (drule not_disjointI, + rule irq_node_offs_in_range, + assumption, + erule notE, + rule kh0H_dom_sets_distinct) + apply (cut_tac ptr="Low_tcb_ptr" and x="0x40" in tcb_offs_in_rangeI, simp add: s0_ptr_defs, simp add: s0_ptr_defs) + apply (clarsimp simp: map_to_ctes_kh0H kh0H_dom_distinct kh0H_dom_distinct' + option_update_range_def not_in_range_cte_None + split: option.splits) + apply ((simp add: offs_in_range kh0H_dom_sets_distinct[THEN orthD1] not_in_range_cte_None + | simp add: offs_in_range kh0H_dom_sets_distinct[THEN orthD2] not_in_range_cte_None)+)[1] + apply (intro conjI impI allI) + apply (simp add: s0_ptr_defs) + apply clarsimp + apply (drule not_disjointI, + rule irq_node_offs_in_range, + assumption, + erule notE, + rule kh0H_dom_sets_distinct) + apply (clarsimp simp: kh0H_dom_distinct) + apply clarsimp + apply (drule not_disjointI, + rule irq_node_offs_in_range, + assumption, + erule notE, + rule kh0H_dom_sets_distinct) + apply (cut_tac ptr="Low_tcb_ptr" and x="0x60" in tcb_offs_in_rangeI, simp add: s0_ptr_defs, simp add: s0_ptr_defs) + apply (clarsimp simp: map_to_ctes_kh0H kh0H_dom_distinct kh0H_dom_distinct' + option_update_range_def not_in_range_cte_None + split: option.splits) + apply ((simp add: offs_in_range kh0H_dom_sets_distinct[THEN orthD1] not_in_range_cte_None + | simp add: offs_in_range kh0H_dom_sets_distinct[THEN orthD2] not_in_range_cte_None)+)[1] + apply (intro conjI impI allI) + apply (simp add: s0_ptr_defs) + apply clarsimp + apply (drule not_disjointI, + rule irq_node_offs_in_range, + assumption, + erule notE, + rule kh0H_dom_sets_distinct) + apply (clarsimp simp: kh0H_dom_distinct) + apply clarsimp + apply (drule not_disjointI, + rule irq_node_offs_in_range, + assumption, + erule notE, + rule kh0H_dom_sets_distinct) + apply (cut_tac ptr="Low_tcb_ptr" and x="0x80" in tcb_offs_in_rangeI, simp add: s0_ptr_defs, simp add: s0_ptr_defs) + apply (clarsimp simp: map_to_ctes_kh0H kh0H_dom_distinct kh0H_dom_distinct' + option_update_range_def not_in_range_cte_None + split: option.splits) + apply ((simp add: offs_in_range kh0H_dom_sets_distinct[THEN orthD1] not_in_range_cte_None + | simp add: offs_in_range kh0H_dom_sets_distinct[THEN orthD2] not_in_range_cte_None)+)[1] + apply (intro conjI impI allI) + apply (simp add: s0_ptr_defs) + apply clarsimp + apply (drule not_disjointI, + rule irq_node_offs_in_range, + assumption, + erule notE, + rule kh0H_dom_sets_distinct) + apply (clarsimp simp: kh0H_dom_distinct) + apply clarsimp + apply (drule not_disjointI, + rule irq_node_offs_in_range, + assumption, + erule notE, + rule kh0H_dom_sets_distinct) + apply (clarsimp simp: map_to_ctes_kh0H option_update_range_def kh0H_dom_distinct not_in_range_cte_None split: option.splits) + apply (cut_tac ptr="High_tcb_ptr" and x="0x20" in tcb_offs_in_rangeI, simp add: s0_ptr_defs, simp add: s0_ptr_defs) + apply (clarsimp simp: map_to_ctes_kh0H kh0H_dom_distinct kh0H_dom_distinct' + option_update_range_def not_in_range_cte_None + split: option.splits) + apply ((simp add: offs_in_range kh0H_dom_sets_distinct[THEN orthD1] not_in_range_cte_None + | simp add: offs_in_range kh0H_dom_sets_distinct[THEN orthD2] not_in_range_cte_None)+)[1] + apply (intro conjI impI allI) + apply (simp add: s0_ptr_defs) + apply clarsimp + apply (drule not_disjointI, + rule irq_node_offs_in_range, + assumption, + erule notE, + rule kh0H_dom_sets_distinct) + apply (clarsimp simp: kh0H_dom_distinct) + apply clarsimp + apply (drule not_disjointI, + rule irq_node_offs_in_range, + assumption, + erule notE, + rule kh0H_dom_sets_distinct) + apply (cut_tac ptr="High_tcb_ptr" and x="0x40" in tcb_offs_in_rangeI, simp add: s0_ptr_defs, simp add: s0_ptr_defs) + apply (clarsimp simp: map_to_ctes_kh0H kh0H_dom_distinct kh0H_dom_distinct' + option_update_range_def not_in_range_cte_None + split: option.splits) + apply ((simp add: offs_in_range kh0H_dom_sets_distinct[THEN orthD1] not_in_range_cte_None + | simp add: offs_in_range kh0H_dom_sets_distinct[THEN orthD2] not_in_range_cte_None)+)[1] + apply (intro conjI impI allI) + apply (simp add: s0_ptr_defs) + apply clarsimp + apply (drule not_disjointI, + rule irq_node_offs_in_range, + assumption, + erule notE, + rule kh0H_dom_sets_distinct) + apply (clarsimp simp: kh0H_dom_distinct) + apply clarsimp + apply (drule not_disjointI, + rule irq_node_offs_in_range, + assumption, + erule notE, + rule kh0H_dom_sets_distinct) + apply (cut_tac ptr="High_tcb_ptr" and x="0x60" in tcb_offs_in_rangeI, simp add: s0_ptr_defs, simp add: s0_ptr_defs) + apply (clarsimp simp: map_to_ctes_kh0H kh0H_dom_distinct kh0H_dom_distinct' + option_update_range_def not_in_range_cte_None + split: option.splits) + apply ((simp add: offs_in_range kh0H_dom_sets_distinct[THEN orthD1] not_in_range_cte_None + | simp add: offs_in_range kh0H_dom_sets_distinct[THEN orthD2] not_in_range_cte_None)+)[1] + apply (intro conjI impI allI) + apply (simp add: s0_ptr_defs) + apply clarsimp + apply (drule not_disjointI, + rule irq_node_offs_in_range, + assumption, + erule notE, + rule kh0H_dom_sets_distinct) + apply (clarsimp simp: kh0H_dom_distinct) + apply clarsimp + apply (drule not_disjointI, + rule irq_node_offs_in_range, + assumption, + erule notE, + rule kh0H_dom_sets_distinct) + apply (cut_tac ptr="High_tcb_ptr" and x="0x80" in tcb_offs_in_rangeI, simp add: s0_ptr_defs, simp add: s0_ptr_defs) + apply (clarsimp simp: map_to_ctes_kh0H kh0H_dom_distinct kh0H_dom_distinct' + option_update_range_def not_in_range_cte_None + split: option.splits) + apply ((simp add: offs_in_range kh0H_dom_sets_distinct[THEN orthD1] not_in_range_cte_None + | simp add: offs_in_range kh0H_dom_sets_distinct[THEN orthD2] not_in_range_cte_None)+)[1] + apply (intro conjI impI allI) + apply (simp add: s0_ptr_defs) + apply clarsimp + apply (drule not_disjointI, + rule irq_node_offs_in_range, + assumption, + erule notE, + rule kh0H_dom_sets_distinct) + apply (clarsimp simp: kh0H_dom_distinct) + apply clarsimp + apply (drule not_disjointI, + rule irq_node_offs_in_range, + assumption, + erule notE, + rule kh0H_dom_sets_distinct) + apply (clarsimp simp: map_to_ctes_kh0H option_update_range_def kh0H_dom_distinct not_in_range_cte_None split: option.splits) + apply (cut_tac ptr="idle_tcb_ptr" and x="0x20" in tcb_offs_in_rangeI, simp add: s0_ptr_defs, simp add: s0_ptr_defs) + apply (clarsimp simp: map_to_ctes_kh0H kh0H_dom_distinct kh0H_dom_distinct' + option_update_range_def not_in_range_cte_None + split: option.splits) + apply ((simp add: offs_in_range kh0H_dom_sets_distinct[THEN orthD1] not_in_range_cte_None + | simp add: offs_in_range kh0H_dom_sets_distinct[THEN orthD2] not_in_range_cte_None)+)[1] + apply (intro conjI impI allI) + apply (simp add: s0_ptr_defs) + apply clarsimp + apply (drule not_disjointI, + rule irq_node_offs_in_range, + assumption, + erule notE, + rule kh0H_dom_sets_distinct) + apply (clarsimp simp: kh0H_dom_distinct) + apply clarsimp + apply (drule not_disjointI, + rule irq_node_offs_in_range, + assumption, + erule notE, + rule kh0H_dom_sets_distinct) + apply (cut_tac ptr="idle_tcb_ptr" and x="0x40" in tcb_offs_in_rangeI, simp add: s0_ptr_defs, simp add: s0_ptr_defs) + apply (clarsimp simp: map_to_ctes_kh0H kh0H_dom_distinct kh0H_dom_distinct' + option_update_range_def not_in_range_cte_None + split: option.splits) + apply ((simp add: offs_in_range kh0H_dom_sets_distinct[THEN orthD1] not_in_range_cte_None + | simp add: offs_in_range kh0H_dom_sets_distinct[THEN orthD2] not_in_range_cte_None)+)[1] + apply (intro conjI impI allI) + apply (simp add: s0_ptr_defs) + apply clarsimp + apply (drule not_disjointI, + rule irq_node_offs_in_range, + assumption, + erule notE, + rule kh0H_dom_sets_distinct) + apply (clarsimp simp: kh0H_dom_distinct) + apply clarsimp + apply (drule not_disjointI, + rule irq_node_offs_in_range, + assumption, + erule notE, + rule kh0H_dom_sets_distinct) + apply (cut_tac ptr="idle_tcb_ptr" and x="0x60" in tcb_offs_in_rangeI, simp add: s0_ptr_defs, simp add: s0_ptr_defs) + apply (clarsimp simp: map_to_ctes_kh0H kh0H_dom_distinct kh0H_dom_distinct' + option_update_range_def not_in_range_cte_None + split: option.splits) + apply ((simp add: offs_in_range kh0H_dom_sets_distinct[THEN orthD1] not_in_range_cte_None + | simp add: offs_in_range kh0H_dom_sets_distinct[THEN orthD2] not_in_range_cte_None)+)[1] + apply (intro conjI impI allI) + apply (simp add: s0_ptr_defs) + apply clarsimp + apply (drule not_disjointI, + rule irq_node_offs_in_range, + assumption, + erule notE, + rule kh0H_dom_sets_distinct) + apply (clarsimp simp: kh0H_dom_distinct) + apply clarsimp + apply (drule not_disjointI, + rule irq_node_offs_in_range, + assumption, + erule notE, + rule kh0H_dom_sets_distinct) + apply (cut_tac ptr="idle_tcb_ptr" and x="0x80" in tcb_offs_in_rangeI, simp add: s0_ptr_defs, simp add: s0_ptr_defs) + apply (clarsimp simp: map_to_ctes_kh0H kh0H_dom_distinct kh0H_dom_distinct' + option_update_range_def not_in_range_cte_None + split: option.splits) + apply ((simp add: offs_in_range kh0H_dom_sets_distinct[THEN orthD1] not_in_range_cte_None + | simp add: offs_in_range kh0H_dom_sets_distinct[THEN orthD2] not_in_range_cte_None)+)[1] + apply (intro conjI impI allI) + apply (simp add: s0_ptr_defs) + apply clarsimp + apply (drule not_disjointI, + rule irq_node_offs_in_range, + assumption, + erule notE, + rule kh0H_dom_sets_distinct) + apply (clarsimp simp: kh0H_dom_distinct) + apply clarsimp + by (drule not_disjointI, + rule irq_node_offs_in_range, + assumption, + erule notE, + rule kh0H_dom_sets_distinct) + +lemma map_to_ctes_kh0H_dom: + "dom (map_to_ctes kh0H) = {idle_tcb_ptr, idle_tcb_ptr + 0x20, idle_tcb_ptr + 0x40, + idle_tcb_ptr + 0x60, idle_tcb_ptr + 0x80, + Low_tcb_ptr, Low_tcb_ptr + 0x20, Low_tcb_ptr + 0x40, + Low_tcb_ptr + 0x60, Low_tcb_ptr + 0x80, + High_tcb_ptr, High_tcb_ptr + 0x20, High_tcb_ptr + 0x40, + High_tcb_ptr + 0x60, High_tcb_ptr + 0x80, + irq_cnode_ptr} + \ irq_node_offs_range + \ cnode_offs_range Silc_cnode_ptr + \ cnode_offs_range High_cnode_ptr + \ cnode_offs_range Low_cnode_ptr" + supply option.case_cong[cong] if_cong[cong] + apply (rule equalityI) + apply (simp add: map_to_ctes_kh0H dom_def) + apply clarsimp + apply (clarsimp simp: offs_in_range option_update_range_def split: option.splits if_split_asm) + apply (clarsimp simp: idle_tcb_cte_def) + apply (clarsimp simp: High_tcb_cte_def) + apply (clarsimp simp: Low_tcb_cte_def) + apply (clarsimp simp: Silc_cte_cte_def cnode_offs_range_def split: if_split_asm) + apply (clarsimp simp: High_cte_cte_def cnode_offs_range_def split: if_split_asm) + apply (clarsimp simp: Low_cte_cte_def cnode_offs_range_def split: if_split_asm) + apply (clarsimp simp: dom_def) + apply (clarsimp simp: idle_tcb_cte_def Low_tcb_cte_def High_tcb_cte_def) + apply (rule conjI) + apply (fastforce dest: irq_node_offs_range_correct) + apply (rule conjI) + apply clarsimp + apply (frule cnode_offs_range_correct) + apply (clarsimp simp: Silc_cte_cte_def Silc_cte'_def Silc_capsH_def empty_cte_def cnode_offs_range_def) + apply (rule conjI) + apply clarsimp + apply (frule cnode_offs_range_correct) + apply (clarsimp simp: High_cte_cte_def High_cte'_def High_capsH_def empty_cte_def cnode_offs_range_def) + apply clarsimp + apply (frule cnode_offs_range_correct) + apply (clarsimp simp: Low_cte_cte_def Low_cte'_def Low_capsH_def empty_cte_def cnode_offs_range_def) + done + +lemmas map_to_ctes_kh0H_SomeD' = + set_mp[OF equalityD1[OF map_to_ctes_kh0H_dom[simplified dom_def]], OF CollectI, simplified, OF exI] + +lemma map_to_ctes_kh0H_SomeD: + "map_to_ctes kh0H x = Some y + \ x = idle_tcb_ptr \ y = (CTE NullCap Null_mdb) \ + x = idle_tcb_ptr + 0x20 \ y = (CTE NullCap Null_mdb) \ + x = idle_tcb_ptr + 0x40 \ y = (CTE NullCap Null_mdb) \ + x = idle_tcb_ptr + 0x60 \ y = (CTE NullCap Null_mdb) \ + x = idle_tcb_ptr + 0x80 \ y = (CTE NullCap Null_mdb) \ + x = Low_tcb_ptr \ y = (CTE (CNodeCap Low_cnode_ptr 10 2 10) (MDB (Low_cnode_ptr + 0x40) 0 False False)) \ + x = Low_tcb_ptr + 0x20 \ y = (CTE (ArchObjectCap (PageTableCap Low_pd_ptr (Some (ucast Low_asid,0)))) + (MDB (Low_cnode_ptr + 0x60) 0 False False)) \ + x = Low_tcb_ptr + 0x40 \ y = (CTE (ReplyCap Low_tcb_ptr True True) (MDB 0 0 True True)) \ + x = Low_tcb_ptr + 0x60 \ y = (CTE NullCap Null_mdb) \ + x = Low_tcb_ptr + 0x80 \ y = (CTE NullCap Null_mdb) \ + x = High_tcb_ptr \ y = (CTE (CNodeCap High_cnode_ptr 10 2 10) (MDB (High_cnode_ptr + 0x40) 0 False False)) \ + x = High_tcb_ptr + 0x20 \ y = (CTE (ArchObjectCap (PageTableCap High_pd_ptr (Some (ucast High_asid, 0)))) + (MDB (High_cnode_ptr + 0x60) 0 False False)) \ + x = High_tcb_ptr + 0x40 \ y = (CTE (ReplyCap High_tcb_ptr True True) (MDB 0 0 True True)) \ + x = High_tcb_ptr + 0x60 \ y = (CTE NullCap Null_mdb) \ + x = High_tcb_ptr + 0x80 \ y = (CTE NullCap Null_mdb) \ + x = irq_cnode_ptr \ y = (CTE NullCap Null_mdb) \ + x \ irq_node_offs_range \ y = (CTE NullCap Null_mdb) \ + x \ cnode_offs_range Silc_cnode_ptr \ Silc_cte_cte Silc_cnode_ptr x \ None + \ y = the (Silc_cte_cte Silc_cnode_ptr x) \ + x \ cnode_offs_range High_cnode_ptr \ High_cte_cte High_cnode_ptr x \ None \ y = the (High_cte_cte High_cnode_ptr x) \ + x \ cnode_offs_range Low_cnode_ptr \ Low_cte_cte Low_cnode_ptr x \ None \ y = the (Low_cte_cte Low_cnode_ptr x)" + apply (frule map_to_ctes_kh0H_SomeD') + apply (erule disjE, rule disjI1, clarsimp simp: idle_tcb_cte_def idle_tcbH_def + Low_tcb_cte_def Low_tcbH_def Low_capsH_def + High_tcb_cte_def High_tcbH_def High_capsH_def + the_nat_to_bl_def nat_to_bl_def, rule disjI2)+ + apply ((erule disjE)?, drule offs_range_correct, clarsimp simp: offs_in_range)+ + done + +lemma mask_neg_add_aligned: + "is_aligned q n \ p + q && ~~ mask n = (p && ~~ mask n) + q" + apply (subst add.commute) + apply (simp add: mask_out_add_aligned[symmetric]) + done + +lemma mask_neg_add_aligned': + "is_aligned q n \ q + p && ~~ mask n = (p && ~~ mask n) + q" + by (simp add: mask_out_add_aligned[symmetric]) + +lemma kh_s0H[simp]: + "ksPSpace s0H_internal = kh0H" + by (simp add: s0H_internal_def) + +lemma pspace_distinct'_split: + notes less_1_simp[simp del] shows + "(\(y, ko) \ graph_of (ksPSpace ks). (x \ y \ y + (1 << objBitsKO ko) - 1 < x) + \ y \ y + (1 << objBitsKO ko) - 1) + \ pspace_distinct' (ks \ksPSpace := restrict_map (ksPSpace ks) {..< x}\) + \ pspace_distinct' (ks \ksPSpace := restrict_map (ksPSpace ks) {x ..}\) + \ pspace_distinct' ks" + apply (clarsimp simp: pspace_distinct'_def) + apply (drule bspec, erule graph_ofI, clarsimp) + apply (simp add: Ball_def) + apply (drule_tac x=xa in spec)+ + apply (erule disjE) + apply (simp add: domI) + apply (thin_tac "P \ Q" for P Q) + apply (simp add: ps_clear_def) + apply (erule trans[rotated]) + apply auto[1] + apply (clarsimp simp add: domI) + apply (drule mp, erule(1) order_le_less_trans) + apply (thin_tac "P \ Q" for P Q) + apply (simp add: ps_clear_def) + apply (erule trans[rotated]) + apply (fastforce simp: mask_eq_exp_minus_1 add_diff_eq) + done + +lemma irq_node_offs_range_def2: + "irq_node_offs_range = {x. init_irq_node_ptr \ x \ x \ init_irq_node_ptr + 0x7E0} \ + {x. is_aligned x 5}" + apply (safe, simp_all add: irq_node_offs_range_def add.commute) + by (auto dest: word_less_sub_1 simp: s0_ptr_defs elim: dual_order.strict_trans2[rotated]) + +(* FIXME IF: fix repetitiveness *) +lemma s0H_pspace_distinct': + notes pteBits_def[simp] objBits_defs[simp] + shows "pspace_distinct' s0H_internal" + supply option.case_cong[cong] if_cong[cong] + apply (clarsimp simp: pspace_distinct'_def ps_clear_def mask_eq_exp_minus_1) + apply (rule disjointI) + apply clarsimp + apply (drule kh0H_SomeD)+ + \ \ntfn_ptr\ + apply (erule_tac P="_ \ y = _" in disjE) + subgoal by ((elim disjE; clarsimp), + (thin_tac "_ \ _", clarsimp simp: pt_offs_range_def page_offs_range_def + cnode_offs_range_def irq_node_offs_range_def2 + , drule dual_order.trans, assumption + , clarsimp simp: s0_ptr_defs objBitsKO_def + | solves \clarsimp simp: s0_ptr_defs objBitsKO_def\)+) + \ \Low_tcb_ptr\ + apply (erule_tac P="_ \ y = _" in disjE) + subgoal by ((elim disjE; clarsimp), + (thin_tac "_ \ _", clarsimp simp: pt_offs_range_def page_offs_range_def + cnode_offs_range_def irq_node_offs_range_def2 + , drule dual_order.trans, assumption + , clarsimp simp: s0_ptr_defs objBitsKO_def + | solves \clarsimp simp: s0_ptr_defs objBitsKO_def\)+) + \ \High_tcb_ptr\ + apply (erule_tac P="_ \ y = _" in disjE) + subgoal by ((elim disjE; clarsimp), + (thin_tac "_ \ _", clarsimp simp: pt_offs_range_def page_offs_range_def + cnode_offs_range_def irq_node_offs_range_def2 + , drule dual_order.trans, assumption + , clarsimp simp: s0_ptr_defs objBitsKO_def + | solves \clarsimp simp: s0_ptr_defs objBitsKO_def\)+) + \ \Idle_tcb_ptr\ + apply (erule_tac P="_ \ y = _" in disjE) + subgoal by ((elim disjE; clarsimp), + (thin_tac "_ \ _", clarsimp simp: pt_offs_range_def page_offs_range_def + cnode_offs_range_def irq_node_offs_range_def2 + , drule dual_order.trans, assumption + , clarsimp simp: s0_ptr_defs objBitsKO_def + | solves \clarsimp simp: s0_ptr_defs objBitsKO_def\)+) + \ \riscv_global_pt_ptr\ + apply (erule_tac P="_ \ _ \ y = _" in disjE) + apply (elim disjE; clarsimp) + apply ((clarsimp simp: irq_node_offs_range_def2 pt_offs_range_def objBitsKO_def, + drule dual_order.trans, assumption, + (thin_tac "ya \ _", thin_tac "_ \ ya", + drule_tac b=ya and a="_ + _" in dual_order.trans, assumption)?, + simp add: s0_ptr_defs)+)[4] + apply (clarsimp simp: pt_offs_range_def objBitsKO_def archObjSize_def kh0H_obj_def bit_simps + split: if_split_asm; + (drule (1) aligned_le_sharp, simp add: mask_neg_add_aligned', fastforce simp: mask_def)) + apply (thin_tac "_ \ _", + clarsimp simp: kh0H_obj_def objBitsKO_def archObjSize_def bit_simps pt_offs_range_def + irq_node_offs_range_def2 cnode_offs_range_def page_offs_range_def, + drule_tac a=x and b="_ + _" in aligned_le_sharp, assumption, + drule dual_order.trans[rotated], + erule word_plus_mono_left, simp add: s0_ptr_defs mask_def, + (drule_tac b=ya and a="(_ && ~~ mask _) + _" in dual_order.trans, assumption)?, + simp add: s0_ptr_defs mask_def)+ + \ \irq_node_offs_range\ + apply (erule_tac P="_ \ y = _" in disjE) + apply (elim disjE; clarsimp) + apply ((clarsimp simp: irq_node_offs_range_def2 pt_offs_range_def objBitsKO_def, + drule dual_order.trans, assumption, + (thin_tac "ya \ _", thin_tac "_ \ ya", + drule_tac b=ya and a="_ + _" in dual_order.trans, assumption)?, + simp add: s0_ptr_defs)+)[5] + apply (clarsimp simp: irq_node_offs_range_def objBitsKO_def archObjSize_def kh0H_obj_def + split: if_split_asm; + (drule(1) aligned_le_sharp, simp add: mask_neg_add_aligned', fastforce simp: mask_def)) + apply (thin_tac "_ \ _", + clarsimp simp: kh0H_obj_def objBitsKO_def archObjSize_def bit_simps pt_offs_range_def + irq_node_offs_range_def2 cnode_offs_range_def page_offs_range_def, + drule_tac a=x and b="_ + _" in aligned_le_sharp, assumption, + drule dual_order.trans[rotated], + erule word_plus_mono_left, simp add: s0_ptr_defs mask_def, + (drule_tac b=ya and a="(_ && ~~ mask _) + _" in dual_order.trans, assumption)?, + simp add: s0_ptr_defs mask_def)+ + \ \Low_pt_ptr\ + apply (erule_tac P="_ \ _ \ y = _" in disjE) + apply (elim disjE; clarsimp) + apply ((clarsimp simp: irq_node_offs_range_def2 pt_offs_range_def objBitsKO_def, + drule dual_order.trans, assumption, + (thin_tac "ya \ _", thin_tac "_ \ ya", + drule_tac b=ya and a="_ + _" in dual_order.trans, assumption)?, + simp add: s0_ptr_defs)+)[6] + apply (clarsimp simp: pt_offs_range_def objBitsKO_def archObjSize_def kh0H_obj_def bit_simps + split: if_split_asm; + (drule (1) aligned_le_sharp, simp add: mask_neg_add_aligned', fastforce simp: mask_def)) + apply (thin_tac "_ \ _", + clarsimp simp: kh0H_obj_def objBitsKO_def archObjSize_def bit_simps pt_offs_range_def + irq_node_offs_range_def2 cnode_offs_range_def page_offs_range_def, + drule_tac a=x and b="_ + _" in aligned_le_sharp, assumption, + drule dual_order.trans[rotated], + erule word_plus_mono_left, simp add: s0_ptr_defs mask_def, + (drule_tac b=ya and a="(_ && ~~ mask _) + _" in dual_order.trans, assumption)?, + simp add: s0_ptr_defs mask_def)+ + \ \High_pt_ptr\ + apply (erule_tac P="_ \ _ \ y = _" in disjE) + apply (elim disjE; clarsimp) + apply ((clarsimp simp: irq_node_offs_range_def2 pt_offs_range_def objBitsKO_def, + drule dual_order.trans, assumption, + (thin_tac "ya \ _", thin_tac "_ \ ya", + drule_tac b=ya and a="_ + _" in dual_order.trans, assumption)?, + simp add: s0_ptr_defs)+)[7] + apply (clarsimp simp: pt_offs_range_def objBitsKO_def archObjSize_def kh0H_obj_def bit_simps + split: if_split_asm; + (drule (1) aligned_le_sharp, simp add: mask_neg_add_aligned', fastforce simp: mask_def)) + apply (thin_tac "_ \ _", + clarsimp simp: kh0H_obj_def objBitsKO_def archObjSize_def bit_simps pt_offs_range_def + irq_node_offs_range_def2 cnode_offs_range_def page_offs_range_def, + drule_tac a=x and b="_ + _" in aligned_le_sharp, assumption, + drule dual_order.trans[rotated], + erule word_plus_mono_left, simp add: s0_ptr_defs mask_def, + (drule_tac b=ya and a="(_ && ~~ mask _) + _" in dual_order.trans, assumption)?, + simp add: s0_ptr_defs mask_def)+ + \ \Low_pd_ptr\ + apply (erule_tac P="_ \ _ \ y = _" in disjE) + apply (elim disjE; clarsimp) + apply ((clarsimp simp: irq_node_offs_range_def2 pt_offs_range_def objBitsKO_def, + drule dual_order.trans, assumption, + (thin_tac "ya \ _", thin_tac "_ \ ya", + drule_tac b=ya and a="_ + _" in dual_order.trans, assumption)?, + simp add: s0_ptr_defs)+)[8] + apply (clarsimp simp: pt_offs_range_def objBitsKO_def archObjSize_def kh0H_obj_def bit_simps + split: if_split_asm; + (drule (1) aligned_le_sharp, simp add: mask_neg_add_aligned', fastforce simp: mask_def)) + apply (thin_tac "_ \ _", + clarsimp simp: kh0H_obj_def objBitsKO_def archObjSize_def bit_simps pt_offs_range_def + irq_node_offs_range_def2 cnode_offs_range_def page_offs_range_def, + drule_tac a=x and b="_ + _" in aligned_le_sharp, assumption, + drule dual_order.trans[rotated], + erule word_plus_mono_left, simp add: s0_ptr_defs mask_def, + (drule_tac b=ya and a="(_ && ~~ mask _) + _" in dual_order.trans, assumption)?, + simp add: s0_ptr_defs mask_def)+ + \ \High_pd_ptr\ + apply (erule_tac P="_ \ _ \ y = _" in disjE) + apply (elim disjE; clarsimp) + apply ((clarsimp simp: irq_node_offs_range_def2 pt_offs_range_def objBitsKO_def, + drule dual_order.trans, assumption, + (thin_tac "ya \ _", thin_tac "_ \ ya", + drule_tac b=ya and a="_ + _" in dual_order.trans, assumption)?, + simp add: s0_ptr_defs)+)[9] + apply (clarsimp simp: pt_offs_range_def objBitsKO_def archObjSize_def kh0H_obj_def bit_simps + split: if_split_asm; + (drule (1) aligned_le_sharp, simp add: mask_neg_add_aligned', fastforce simp: mask_def)) + apply (thin_tac "_ \ _", + clarsimp simp: kh0H_obj_def objBitsKO_def archObjSize_def bit_simps pt_offs_range_def + irq_node_offs_range_def cnode_offs_range_def page_offs_range_def, + drule_tac a=x and b="_ + _" in aligned_le_sharp, assumption, + drule dual_order.trans[rotated], + erule word_plus_mono_left, simp add: s0_ptr_defs mask_def, + (drule_tac b=ya and a="(_ && ~~ mask _) + _" in dual_order.trans, assumption)?, + simp add: s0_ptr_defs mask_def)+ + \ \Low_pool_ptr\ + apply (erule_tac P="_ \ y = _" in disjE) + subgoal for x y ya yb + by ((elim disjE; clarsimp), + ((clarsimp simp: irq_node_offs_range_def2 pt_offs_range_def cnode_offs_range_def + page_offs_range_def objBitsKO_def archObjSize_def kh0H_obj_def, + (thin_tac "ya \ _", drule dual_order.trans, assumption | + thin_tac "_ \ ya", drule dual_order.trans, assumption)?, + solves \clarsimp simp: s0_ptr_defs bit_simps\)+)) + \ \High_pool_ptr\ + apply (erule_tac P="_ \ y = _" in disjE) + subgoal for x y ya yb + by ((elim disjE; clarsimp), + ((clarsimp simp: irq_node_offs_range_def2 pt_offs_range_def cnode_offs_range_def + page_offs_range_def objBitsKO_def archObjSize_def kh0H_obj_def, + (thin_tac "ya \ _", drule dual_order.trans, assumption | + thin_tac "_ \ ya", drule dual_order.trans, assumption)?, + solves \clarsimp simp: s0_ptr_defs bit_simps\)+)) + \ \Low_cnode_ptr\ + apply (erule_tac P="_ \ _ \ y = _" in disjE) + apply (elim disjE; clarsimp) + apply ((clarsimp simp: cnode_offs_range_def irq_node_offs_range_def2 + pt_offs_range_def objBitsKO_def, + drule dual_order.trans, assumption, + (thin_tac "ya \ _", thin_tac "_ \ ya", + drule_tac b=ya and a="_ + _" in dual_order.trans, assumption)?, + simp add: s0_ptr_defs)+)[12] + apply (clarsimp simp: objBitsKO_def kh0H_obj_def Low_cte'_def Low_capsH_def cnode_offs_range_def + split: if_split_asm; + (drule (1) aligned_le_sharp, simp add: mask_neg_add_aligned', fastforce simp: mask_def)) + apply (thin_tac "_ \ _", + clarsimp simp: kh0H_obj_def objBitsKO_def archObjSize_def bit_simps pt_offs_range_def + irq_node_offs_range_def cnode_offs_range_def page_offs_range_def, + drule_tac a=x and b="_ + _" in aligned_le_sharp, assumption, + drule dual_order.trans[rotated], + erule word_plus_mono_left, simp add: s0_ptr_defs mask_def, + (drule_tac b=ya and a="(_ && ~~ mask _) + _" in dual_order.trans, assumption)?, + simp add: s0_ptr_defs mask_def)+ + \ \High_cnode_ptr\ + apply (erule_tac P="_ \ _ \ y = _" in disjE) + apply (elim disjE; clarsimp) + apply ((clarsimp simp: cnode_offs_range_def irq_node_offs_range_def2 + pt_offs_range_def objBitsKO_def, + drule dual_order.trans, assumption, + (thin_tac "ya \ _", thin_tac "_ \ ya", + drule_tac b=ya and a="_ + _" in dual_order.trans, assumption)?, + simp add: s0_ptr_defs)+)[13] + apply (clarsimp simp: objBitsKO_def kh0H_obj_def High_cte'_def High_capsH_def cnode_offs_range_def + split: if_split_asm; + (drule (1) aligned_le_sharp, simp add: mask_neg_add_aligned', fastforce simp: mask_def)) + apply (thin_tac "_ \ _", + clarsimp simp: kh0H_obj_def objBitsKO_def archObjSize_def bit_simps pt_offs_range_def + irq_node_offs_range_def cnode_offs_range_def page_offs_range_def, + drule_tac a=x and b="_ + _" in aligned_le_sharp, assumption, + drule dual_order.trans[rotated], + erule word_plus_mono_left, simp add: s0_ptr_defs mask_def, + (drule_tac b=ya and a="(_ && ~~ mask _) + _" in dual_order.trans, assumption)?, + simp add: s0_ptr_defs mask_def)+ + \ \Silc_cnode_ptr\ + apply (erule_tac P="_ \ _ \ y = _" in disjE) + apply (elim disjE; clarsimp) + apply ((clarsimp simp: cnode_offs_range_def irq_node_offs_range_def2 + pt_offs_range_def objBitsKO_def, + drule dual_order.trans, assumption, + (thin_tac "ya \ _", thin_tac "_ \ ya", + drule_tac b=ya and a="_ + _" in dual_order.trans, assumption)?, + simp add: s0_ptr_defs)+)[14] + apply (clarsimp simp: objBitsKO_def kh0H_obj_def Silc_cte'_def Silc_capsH_def cnode_offs_range_def + split: if_split_asm; + (drule (1) aligned_le_sharp, simp add: mask_neg_add_aligned', fastforce simp: mask_def)) + apply (thin_tac "_ \ _", + clarsimp simp: kh0H_obj_def objBitsKO_def archObjSize_def bit_simps pt_offs_range_def + irq_node_offs_range_def cnode_offs_range_def page_offs_range_def, + drule_tac a=x and b="_ + _" in aligned_le_sharp, assumption, + drule dual_order.trans[rotated], + erule word_plus_mono_left, simp add: s0_ptr_defs mask_def, + (drule_tac b=ya and a="(_ && ~~ mask _) + _" in dual_order.trans, assumption)?, + simp add: s0_ptr_defs mask_def)+ + \ \irq_cnode_ptr\ + apply (erule_tac P="_ \ y = _" in disjE) + subgoal for x y ya yb + by ((elim disjE; clarsimp), + ((clarsimp simp: irq_node_offs_range_def2 pt_offs_range_def cnode_offs_range_def + page_offs_range_def objBitsKO_def archObjSize_def kh0H_obj_def, + (thin_tac "ya \ _", drule dual_order.trans, assumption | + thin_tac "_ \ ya", drule dual_order.trans, assumption)?, + solves \clarsimp simp: s0_ptr_defs bit_simps\)+)) + \ \shared_page_ptr\ + apply (elim disjE; clarsimp) + apply ((clarsimp simp: cnode_offs_range_def irq_node_offs_range_def2 + page_offs_range_def pt_offs_range_def objBitsKO_def, + drule dual_order.trans, assumption, + (thin_tac "ya \ _", thin_tac "_ \ ya", + drule_tac b=ya and a="_ + _" in dual_order.trans, assumption)?, + simp add: s0_ptr_defs)+)[16] + apply (clarsimp simp: page_offs_range_def objBitsKO_def archObjSize_def bit_simps) + apply (drule (1) aligned_le_sharp, simp add: mask_neg_add_aligned', fastforce simp: mask_def) + done + +lemma pspace_distinctD'': + "\ \v. ksPSpace s x = Some v \ objBitsKO v = n; pspace_distinct' s \ + \ ps_clear x n s" + apply clarsimp + apply (drule(1) pspace_distinctD') + apply simp + done + +lemma cnode_offs_min2': + "is_aligned ptr 15 \ (ptr :: obj_ref) \ ptr + 0x20 * (x && mask 10)" + apply (erule is_aligned_no_wrap') + apply (subst mult.commute) + apply (rule div_lt_mult) + apply (cut_tac and_mask_less'[where n=10]) + apply simp + apply simp + apply simp + done + +lemma cnode_offs_min2: + "Low_cnode_ptr \ Low_cnode_ptr + 0x20 * (x && mask 10)" + "High_cnode_ptr \ High_cnode_ptr + 0x20 * (x && mask 10)" + "Silc_cnode_ptr \ Silc_cnode_ptr + 0x20 * (x && mask 10)" + by (simp_all add: cnode_offs_min2' s0_ptrs_aligned) + +lemma cnode_offs_max2': + "is_aligned ptr 15 \ (ptr::obj_ref) + 0x20 * (x && mask 10) \ ptr + 0x7fff" + apply (rule word_plus_mono_right) + apply (subst mult.commute) + apply (rule div_to_mult_word_lt) + apply simp + apply (rule plus_one_helper) + apply simp + apply (cut_tac and_mask_less'[where n=10]) + apply simp + apply simp + apply (drule is_aligned_no_overflow) + apply (simp add: add.commute) + done + +lemma cnode_offs_max2: + "Low_cnode_ptr + 0x20 * (x && mask 10) \ Low_cnode_ptr + 0x7fff" + "High_cnode_ptr + 0x20 * (x && mask 10) \ High_cnode_ptr + 0x7fff" + "Silc_cnode_ptr + 0x20 * (x && mask 10) \ Silc_cnode_ptr + 0x7fff" + by (simp_all add: cnode_offs_max2' s0_ptrs_aligned) + +lemma cnode_offs_in_range2': + "is_aligned ptr 15 \ ptr + 0x20 * (x && mask 10) \ cnode_offs_range ptr" + apply (clarsimp simp: cnode_offs_min2' cnode_offs_max2' cnode_offs_range_def add.commute) + apply (rule is_aligned_add) + apply (erule is_aligned_weaken) + apply simp + apply (rule_tac is_aligned_mult_triv1[where n=5, simplified]) + done + +lemma cnode_offs_in_range2: + "Silc_cnode_ptr + 0x20 * (x && mask 10) \ cnode_offs_range Silc_cnode_ptr" + "Low_cnode_ptr + 0x20 * (x && mask 10) \ cnode_offs_range Low_cnode_ptr" + "High_cnode_ptr + 0x20 * (x && mask 10) \ cnode_offs_range High_cnode_ptr" + by (simp_all add: cnode_offs_in_range2' s0_ptrs_aligned)+ + +lemma kh0H_dom_distinct2: + "Silc_cnode_ptr + 0x20 * (x && mask 10) \ idle_tcb_ptr" + "Silc_cnode_ptr + 0x20 * (x && mask 10) \ High_tcb_ptr" + "Silc_cnode_ptr + 0x20 * (x && mask 10) \ Low_tcb_ptr" + "Silc_cnode_ptr + 0x20 * (x && mask 10) \ High_pool_ptr" + "Silc_cnode_ptr + 0x20 * (x && mask 10) \ Low_pool_ptr" + "Silc_cnode_ptr + 0x20 * (x && mask 10) \ irq_cnode_ptr" + "Silc_cnode_ptr + 0x20 * (x && mask 10) \ ntfn_ptr" + "Low_cnode_ptr + 0x20 * (x && mask 10) \ idle_tcb_ptr" + "Low_cnode_ptr + 0x20 * (x && mask 10) \ High_tcb_ptr" + "Low_cnode_ptr + 0x20 * (x && mask 10) \ Low_tcb_ptr" + "Low_cnode_ptr + 0x20 * (x && mask 10) \ High_pool_ptr" + "Low_cnode_ptr + 0x20 * (x && mask 10) \ Low_pool_ptr" + "Low_cnode_ptr + 0x20 * (x && mask 10) \ irq_cnode_ptr" + "Low_cnode_ptr + 0x20 * (x && mask 10) \ ntfn_ptr" + "High_cnode_ptr + 0x20 * (x && mask 10) \ idle_tcb_ptr" + "High_cnode_ptr + 0x20 * (x && mask 10) \ High_tcb_ptr" + "High_cnode_ptr + 0x20 * (x && mask 10) \ Low_tcb_ptr" + "High_cnode_ptr + 0x20 * (x && mask 10) \ High_pool_ptr" + "High_cnode_ptr + 0x20 * (x && mask 10) \ Low_pool_ptr" + "High_cnode_ptr + 0x20 * (x && mask 10) \ irq_cnode_ptr" + "High_cnode_ptr + 0x20 * (x && mask 10) \ ntfn_ptr" + by (cut_tac x=x in cnode_offs_in_range2(1), fastforce simp: kh0H_dom_distinct + | cut_tac x=x in cnode_offs_in_range2(2), fastforce simp: kh0H_dom_distinct + | cut_tac x=x in cnode_offs_in_range2(3), fastforce simp: kh0H_dom_distinct)+ + +lemma kh0H_cnode_simps2[simp]: + "kh0H (Low_cnode_ptr + 0x20 * (x && mask 10)) = Low_cte Low_cnode_ptr (Low_cnode_ptr + 0x20 * (x && mask 10))" + "kh0H (High_cnode_ptr + 0x20 * (x && mask 10)) = High_cte High_cnode_ptr (High_cnode_ptr + 0x20 * (x && mask 10))" + "kh0H (Silc_cnode_ptr + 0x20 * (x && mask 10)) = Silc_cte Silc_cnode_ptr (Silc_cnode_ptr + 0x20 * (x && mask 10))" + supply option.case_cong[cong] if_cong[cong] + by (clarsimp simp: kh0H_def option_update_range_def cnode_offs_in_range' s0_ptrs_aligned + kh0H_dom_distinct kh0H_dom_distinct2 not_in_range_None, + ((clarsimp simp: cnode_offs_in_range2 kh0H_dom_sets_distinct[THEN orthD1] not_in_range_None + | clarsimp simp: cnode_offs_in_range2 kh0H_dom_sets_distinct[THEN orthD2] not_in_range_None)+), + intro conjI, + (clarsimp, drule not_disjointI, + (rule irq_node_offs_in_range cnode_offs_in_range2 | erule offs_in_range), + (rule irq_node_offs_in_range cnode_offs_in_range2 | erule offs_in_range), + erule notE, rule kh0H_dom_sets_distinct)+, + clarsimp split: option.splits)+ + +lemma cnode_offs_aligned2: + "is_aligned (Low_cnode_ptr + 0x20 * (addr && mask 10)) 5" + "is_aligned (High_cnode_ptr + 0x20 * (addr && mask 10)) 5" + "is_aligned (Silc_cnode_ptr + 0x20 * (addr && mask 10)) 5" + by (rule is_aligned_add, rule is_aligned_weaken, rule s0_ptrs_aligned, + simp, rule is_aligned_mult_triv1[where n=5, simplified])+ + +lemma less_t2n_ex_ucast: + "\ (x::'a::len word) < 2 ^ n; len_of TYPE('b) = n \ \ \y. x = ucast (y::'b::len word)" + apply (rule_tac x="ucast x" in exI) + apply (rule ucast_ucast_len[symmetric]) + apply simp + done + +lemma pd_offs_aligned: + "is_aligned (Low_pd_ptr + (ucast (x :: pt_index) << 3)) 3" + "is_aligned (High_pd_ptr + (ucast (x :: pt_index) << 3)) 3" + by (rule is_aligned_add[OF _ is_aligned_shift], simp add: s0_ptr_defs is_aligned_def)+ + +lemma less_0x200_exists_ucast: + "p < 0x200 \ \p'. p = UCAST(9 \ 64) p'" + apply (rule_tac x="UCAST(64 \ 9) p" in exI) + apply word_bitwise + apply clarsimp + done + +lemma valid_caps_s0H[simp]: + notes pteBits_def[simp] objBits_defs[simp] + shows + "valid_cap' NullCap s0H_internal" + "valid_cap' (ThreadCap Low_tcb_ptr) s0H_internal" + "valid_cap' (ThreadCap High_tcb_ptr) s0H_internal" + "valid_cap' (CNodeCap Low_cnode_ptr 10 2 10) s0H_internal" + "valid_cap' (CNodeCap High_cnode_ptr 10 2 10) s0H_internal" + "valid_cap' (CNodeCap Silc_cnode_ptr 10 2 10) s0H_internal" + "valid_cap' (ArchObjectCap (FrameCap shared_page_ptr_virt VMReadWrite RISCVLargePage False (Some (ucast Low_asid, 0)))) s0H_internal" + "valid_cap' (ArchObjectCap (FrameCap shared_page_ptr_virt VMReadOnly RISCVLargePage False (Some (ucast High_asid, 0)))) s0H_internal" + "valid_cap' (ArchObjectCap (FrameCap shared_page_ptr_virt VMReadOnly RISCVLargePage False (Some (ucast Silc_asid, 0)))) s0H_internal" + "valid_cap' (ArchObjectCap (PageTableCap Low_pt_ptr (Some (ucast Low_asid, 0)))) s0H_internal" + "valid_cap' (ArchObjectCap (PageTableCap High_pt_ptr (Some (ucast High_asid, 0)))) s0H_internal" + "valid_cap' (ArchObjectCap (PageTableCap Low_pd_ptr (Some (ucast Low_asid, 0)))) s0H_internal" + "valid_cap' (ArchObjectCap (PageTableCap High_pd_ptr (Some (ucast High_asid, 0)))) s0H_internal" + "valid_cap' (ArchObjectCap (ASIDPoolCap Low_pool_ptr (ucast Low_asid))) s0H_internal" + "valid_cap' (ArchObjectCap (ASIDPoolCap High_pool_ptr (ucast High_asid))) s0H_internal" + "valid_cap' (NotificationCap ntfn_ptr 0 True False) s0H_internal" + "valid_cap' (NotificationCap ntfn_ptr 0 False True) s0H_internal" + "valid_cap' (ReplyCap Low_tcb_ptr True True) s0H_internal" + "valid_cap' (ReplyCap High_tcb_ptr True True) s0H_internal" + supply option.case_cong[cong] if_cong[cong] + apply (simp | simp add: valid_cap'_def s0H_internal_def capAligned_def word_bits_def + objBits_def s0_ptrs_aligned obj_at'_def, + intro conjI, simp add: objBitsKO_def s0_ptrs_aligned, simp add: objBitsKO_def, + simp add: objBitsKO_def s0_ptrs_aligned mask_def, + rule pspace_distinctD'[OF _ s0H_pspace_distinct', simplified s0H_internal_def], + simp)+ + apply (simp add: valid_cap'_def capAligned_def word_bits_def objBits_def s0_ptrs_aligned obj_at'_def) + apply (intro conjI) + apply (simp add: objBitsKO_def s0_ptrs_aligned) + apply (simp add: objBitsKO_def) + apply (simp add: objBitsKO_def s0_ptrs_aligned mask_def) + apply (clarsimp simp: Low_cte_def Low_cte'_def Low_capsH_def cnode_offs_min2 objBitsKO_def + cnode_offs_max2 cnode_offs_aligned2 add.commute s0_ptrs_aligned empty_cte_def) + apply (rule pspace_distinctD''[OF _ s0H_pspace_distinct']) + apply (simp add: Low_cte_def Low_cte'_def Low_capsH_def empty_cte_def objBitsKO_def + cnode_offs_min2 cnode_offs_max2 cnode_offs_aligned2 add.commute s0_ptrs_aligned) + apply (simp add: valid_cap'_def capAligned_def word_bits_def objBits_def s0_ptrs_aligned obj_at'_def) + apply (intro conjI) + apply (simp add: objBitsKO_def s0_ptrs_aligned) + apply (simp add: objBitsKO_def) + apply (simp add: objBitsKO_def s0_ptrs_aligned mask_def) + apply (clarsimp simp: High_cte_def High_cte'_def High_capsH_def cnode_offs_min2 cnode_offs_max2 + cnode_offs_aligned2 add.commute s0_ptrs_aligned objBitsKO_def empty_cte_def) + apply (rule pspace_distinctD''[OF _ s0H_pspace_distinct']) + apply (simp add: High_cte_def High_cte'_def High_capsH_def empty_cte_def objBitsKO_def + cnode_offs_min2 cnode_offs_max2 cnode_offs_aligned2 add.commute s0_ptrs_aligned) + apply (simp add: valid_cap'_def capAligned_def word_bits_def objBits_def s0_ptrs_aligned obj_at'_def) + apply (intro conjI) + apply (simp add: objBitsKO_def s0_ptrs_aligned) + apply (simp add: objBitsKO_def) + apply (simp add: objBitsKO_def s0_ptrs_aligned mask_def) + apply (clarsimp simp: Silc_cte_def Silc_cte'_def Silc_capsH_def cnode_offs_min2 objBitsKO_def + empty_cte_def cnode_offs_max2 cnode_offs_aligned2 add.commute s0_ptrs_aligned) + apply (rule pspace_distinctD''[OF _ s0H_pspace_distinct']) + apply (simp add: Silc_cte_def Silc_cte'_def Silc_capsH_def empty_cte_def objBitsKO_def + cnode_offs_min2 cnode_offs_max2 cnode_offs_aligned2 add.commute s0_ptrs_aligned) + apply ((clarsimp simp: valid_cap'_def capAligned_def word_bits_def s0_ptrs_aligned bit_simps + Low_asid_def High_asid_def Silc_asid_def asid_bits_defs + vmsz_aligned_def frame_at'_def typ_at'_def ko_wp_at'_def + wellformed_mapdata'_def asid_wf_def mask_def, + drule less_0x200_exists_ucast, clarsimp, clarsimp simp: objBitsKO_def, + rule conjI, clarsimp simp: s0_ptr_defs is_aligned_mask bit_simps mask_def, word_bitwise, + rule pspace_distinctD''[OF _ s0H_pspace_distinct'], + simp add: objBitsKO_def)+)[3] + apply ((simp add: valid_cap'_def capAligned_def word_bits_def Low_asid_def High_asid_def + asid_bits_defs asid_wf_def s0_ptrs_aligned wellformed_mapdata'_def + bit_simps mask_def archObjSize_def pt_offs_min objBitsKO_def + page_table_at'_def typ_at'_def ko_wp_at'_def kh0H_obj_def, safe, + rule pspace_distinctD''[OF _ s0H_pspace_distinct'], + simp add: pt_offs_min kh0H_obj_def archObjSize_def bit_simps objBitsKO_def, + clarsimp simp: is_aligned_mask mask_def s0_ptr_defs, word_bitwise, + fastforce simp: pt_offs_max add.commute)+)[4] + apply ((simp add: valid_cap'_def capAligned_def word_bits_def Low_asid_def High_asid_def + asid_bits_defs asid_wf_def s0_ptrs_aligned bit_simps wellformed_mapdata'_def + mask_def page_table_at'_def typ_at'_def ko_wp_at'_def kh0H_obj_def + objBitsKO_def archObjSize_def pt_offs_min, + intro conjI, clarsimp simp: is_aligned_mask mask_def s0_ptr_defs, + rule pspace_distinctD''[OF _ s0H_pspace_distinct'], + simp add: kh0H_obj_def archObjSize_def bit_simps objBitsKO_def)+)[2] + by (simp add: valid_cap'_def s0H_internal_def capAligned_def word_bits_def objBits_def obj_at'_def, + intro conjI, simp add: objBitsKO_def s0_ptrs_aligned, simp add: objBitsKO_def, + simp add: objBitsKO_def s0_ptrs_aligned mask_def, + rule pspace_distinctD'[OF _ s0H_pspace_distinct', simplified s0H_internal_def], simp)+ + +text \We can only instantiate our example state (featuring high and low domains) if the number + of configured domains is > 1, i.e. that maxDomain is 1 or greater. When seL4 is configured for a + single domain only, none of the state instantiation proofs below are relevant.\ + +lemma s0H_valid_objs': + "1 \ maxDomain \ valid_objs' s0H_internal" + supply objBits_defs[simp] + apply (clarsimp simp: valid_objs'_def ran_def) + apply (drule kh0H_SomeD) + apply (elim disjE) + apply (clarsimp simp: valid_obj'_def ntfnH_def valid_ntfn'_def obj_at'_def) + apply (rule conjI) + apply (clarsimp simp: is_aligned_def s0_ptr_defs objBitsKO_def) + apply (clarsimp simp: pspace_distinctD'[OF _ s0H_pspace_distinct']) + apply (clarsimp simp: valid_obj'_def valid_tcb'_def valid_tcb_state'_def + Low_domain_def minBound_word valid_arch_tcb'_def + Low_mcp_def Low_prio_def maxPriority_def numPriorities_def + tcb_cte_cases_def Low_capsH_def kh0H_obj_def) + apply (clarsimp simp: valid_obj'_def valid_tcb'_def valid_tcb_state'_def + High_domain_def minBound_word valid_arch_tcb'_def + High_mcp_def High_prio_def maxPriority_def numPriorities_def + tcb_cte_cases_def High_capsH_def obj_at'_def kh0H_obj_def) + apply (rule conjI) + apply (simp add: is_aligned_def s0_ptr_defs objBitsKO_def) + apply (clarsimp simp: ntfnH_def pspace_distinctD'[OF _ s0H_pspace_distinct']) + apply (clarsimp simp: valid_obj'_def valid_tcb'_def kh0H_obj_def valid_tcb_state'_def + default_domain_def minBound_word valid_arch_tcb'_def + default_priority_def tcb_cte_cases_def) + defer 2 + apply (auto simp: is_aligned_def addrFromPPtr_def ptrFromPAddr_def pptrBaseOffset_def + valid_obj'_def s0_ptr_defs kh0H_obj_def valid_arch_obj'_def)[5] + apply (auto simp: valid_obj'_def valid_cte'_def empty_cte_def irq_cte_def + valid_arch_obj'_def + Low_cte_def Low_cte'_def Low_capsH_def + High_cte_def High_cte'_def High_capsH_def + Silc_cte_def Silc_cte'_def Silc_capsH_def) + done + +lemmas the_nat_to_bl_simps = the_nat_to_bl_def nat_to_bl_def + +lemma ucast_shiftr_13E: + "\ ucast (p - ptr >> 5) = (0x13E :: 10 word); p \ 0x7FFF + ptr; ptr \ p; + is_aligned ptr 15; is_aligned p 5 \ + \ p = (ptr :: obj_ref) + 0x27C0" + apply (subst(asm) up_ucast_inj_eq[symmetric, where 'b=64]) + apply simp + apply simp + apply (subst(asm) ucast_ucast_len) + apply simp + apply (rule shiftr_less_t2n[where m=10, simplified]) + apply simp + apply (rule word_leq_minus_one_le) + apply simp + apply simp + apply (rule word_diff_ls') + apply simp + apply simp + apply (drule shiftr_eqD[where y="0x27C0" and n=5 and 'a=64, simplified]) + apply (erule(1) aligned_sub_aligned[OF _ is_aligned_weaken]) + apply simp + apply simp + apply (simp add: is_aligned_def) + apply (simp add: diff_eq_eq) + done + +lemma ucast_shiftr_6: + "\ ucast (p - ptr >> 5) = (0x6 :: 10 word); p \ 0x7FFF + ptr; ptr \ p; + is_aligned ptr 15; is_aligned p 5\ + \ p = (ptr :: obj_ref) + 0xC0" + apply (subst(asm) up_ucast_inj_eq[symmetric, where 'b=64]) + apply simp + apply simp + apply (subst(asm) ucast_ucast_len) + apply simp + apply (rule shiftr_less_t2n[where m=10, simplified]) + apply simp + apply (rule word_leq_minus_one_le) + apply simp + apply simp + apply (rule word_diff_ls') + apply simp + apply simp + apply (drule shiftr_eqD[where y="0xC0" and n=5 and 'a=64, simplified]) + apply (erule(1) aligned_sub_aligned[OF _ is_aligned_weaken]) + apply simp + apply simp + apply (simp add: is_aligned_def) + apply (simp add: diff_eq_eq) + done + +lemma ucast_shiftr_5: + "\ ucast (p - ptr >> 5) = (5 :: 10 word); p \ 0x7FFF + ptr; ptr \ p; + is_aligned ptr 15; is_aligned p 5\ + \ p = (ptr :: obj_ref) + 0xA0" + apply (subst(asm) up_ucast_inj_eq[symmetric, where 'b=64]) + apply simp + apply simp + apply (subst(asm) ucast_ucast_len) + apply simp + apply (rule shiftr_less_t2n[where m=10, simplified]) + apply simp + apply (rule word_leq_minus_one_le) + apply simp + apply simp + apply (rule word_diff_ls') + apply simp + apply simp + apply (drule shiftr_eqD[where y="0xA0" and n=5 and 'a=64, simplified]) + apply (erule(1) aligned_sub_aligned[OF _ is_aligned_weaken]) + apply simp + apply simp + apply (simp add: is_aligned_def) + apply (simp add: diff_eq_eq) + done + +lemma ucast_shiftr_4: + "\ ucast (p - ptr >> 5) = (4 :: 10 word); p \ 0x7FFF + ptr; ptr \ p; + is_aligned ptr 15; is_aligned p 5\ + \ p = (ptr :: obj_ref) + 0x80" + apply (subst(asm) up_ucast_inj_eq[symmetric, where 'b=64]) + apply simp + apply simp + apply (subst(asm) ucast_ucast_len) + apply simp + apply (rule shiftr_less_t2n[where m=10, simplified]) + apply simp + apply (rule word_leq_minus_one_le) + apply simp + apply simp + apply (rule word_diff_ls') + apply simp + apply simp + apply (drule shiftr_eqD[where y="0x80" and n=5 and 'a=64, simplified]) + apply (erule(1) aligned_sub_aligned[OF _ is_aligned_weaken]) + apply simp + apply simp + apply (simp add: is_aligned_def) + apply (simp add: diff_eq_eq) + done + +lemma ucast_shiftr_3: + "\ucast (p - ptr >> 5) = (3 :: 10 word); p \ 0x7FFF + ptr; ptr \ p; + is_aligned ptr 15; is_aligned p 5\ + \ p = (ptr :: obj_ref) + 0x60" + apply (subst(asm) up_ucast_inj_eq[symmetric, where 'b=64]) + apply simp + apply simp + apply (subst(asm) ucast_ucast_len) + apply simp + apply (rule shiftr_less_t2n[where m=10, simplified]) + apply simp + apply (rule word_leq_minus_one_le) + apply simp + apply simp + apply (rule word_diff_ls') + apply simp + apply simp + apply (drule shiftr_eqD[where y="0x60" and n=5 and 'a=64, simplified]) + apply (erule(1) aligned_sub_aligned[OF _ is_aligned_weaken]) + apply simp + apply simp + apply (simp add: is_aligned_def) + apply (simp add: diff_eq_eq) + done + +lemma ucast_shiftr_2: + "\ucast (p - ptr >> 5) = (2 :: 10 word); p \ 0x7FFF + ptr; ptr \ p; + is_aligned ptr 15; is_aligned p 5\ + \ p = (ptr :: obj_ref) + 0x40" + apply (subst(asm) up_ucast_inj_eq[symmetric, where 'b=64]) + apply simp + apply simp + apply (subst(asm) ucast_ucast_len) + apply simp + apply (rule shiftr_less_t2n[where m=10, simplified]) + apply simp + apply (rule word_leq_minus_one_le) + apply simp + apply simp + apply (rule word_diff_ls') + apply simp + apply simp + apply (drule shiftr_eqD[where y="0x40" and n=5 and 'a=64, simplified]) + apply (erule(1) aligned_sub_aligned[OF _ is_aligned_weaken]) + apply simp + apply simp + apply (simp add: is_aligned_def) + apply (simp add: diff_eq_eq) + done + +lemma ucast_shiftr_1: + "\ucast (p - ptr >> 5) = (1 :: 10 word); p \ 0x7FFF + ptr; ptr \ p; + is_aligned ptr 15; is_aligned p 5\ + \ p = (ptr :: obj_ref) + 0x20" + apply (subst(asm) up_ucast_inj_eq[symmetric, where 'b=64]) + apply simp + apply simp + apply (subst(asm) ucast_ucast_len) + apply simp + apply (rule shiftr_less_t2n[where m=10, simplified]) + apply simp + apply (rule word_leq_minus_one_le) + apply simp + apply simp + apply (rule word_diff_ls') + apply simp + apply simp + apply (drule shiftr_eqD[where y="0x20" and n=5 and 'a=64, simplified]) + apply (erule(1) aligned_sub_aligned[OF _ is_aligned_weaken]) + apply simp + apply simp + apply (simp add: is_aligned_def) + apply (simp add: diff_eq_eq) + done + +lemmas kh0H_all_obj_def' = Low_cte_cte_def High_cte_cte_def Silc_cte_cte_def + Low_tcb_cte_def High_tcb_cte_def idle_tcb_cte_def kh0H_all_obj_def + +lemma map_to_ctes_kh0H_simps'[simp]: + "map_to_ctes kh0H (Low_cnode_ptr + 0x20) = Some (CTE (ThreadCap Low_tcb_ptr) Null_mdb)" + "map_to_ctes kh0H (Low_cnode_ptr + 0x40) = Some (CTE (CNodeCap Low_cnode_ptr 10 2 10) + (MDB 0 Low_tcb_ptr False False))" + "map_to_ctes kh0H (Low_cnode_ptr + 0x60) = Some (CTE (ArchObjectCap (PageTableCap Low_pd_ptr (Some (ucast Low_asid, 0)))) + (MDB 0 (Low_tcb_ptr + 0x20) False False))" + "map_to_ctes kh0H (Low_cnode_ptr + 0x80) = Some (CTE (ArchObjectCap (ASIDPoolCap Low_pool_ptr (ucast Low_asid))) Null_mdb)" + "map_to_ctes kh0H (Low_cnode_ptr + 0xA0) = Some (CTE (ArchObjectCap (FrameCap shared_page_ptr_virt VMReadWrite RISCVLargePage + False (Some (ucast Low_asid, 0)))) + (MDB 0 (Silc_cnode_ptr + 0xA0) False False))" + "map_to_ctes kh0H (Low_cnode_ptr + 0xC0) = Some (CTE (ArchObjectCap (PageTableCap Low_pt_ptr (Some (ucast Low_asid, 0)))) Null_mdb)" + "map_to_ctes kh0H (Low_cnode_ptr + 0x27C0) = Some (CTE (NotificationCap ntfn_ptr 0 True False) + (MDB (Silc_cnode_ptr + 0x27C0) 0 False False))" + "map_to_ctes kh0H (High_cnode_ptr + 0x20) = Some (CTE (ThreadCap High_tcb_ptr) Null_mdb)" + "map_to_ctes kh0H (High_cnode_ptr + 0x40) = Some (CTE (CNodeCap High_cnode_ptr 10 2 10) (MDB 0 High_tcb_ptr False False))" + "map_to_ctes kh0H (High_cnode_ptr + 0x60) = Some (CTE (ArchObjectCap (PageTableCap High_pd_ptr (Some (ucast High_asid, 0)))) + (MDB 0 (High_tcb_ptr + 0x20) False False))" + "map_to_ctes kh0H (High_cnode_ptr + 0x80) = Some (CTE (ArchObjectCap (ASIDPoolCap High_pool_ptr (ucast High_asid))) Null_mdb)" + "map_to_ctes kh0H (High_cnode_ptr + 0xA0) = Some (CTE (ArchObjectCap (FrameCap shared_page_ptr_virt VMReadOnly RISCVLargePage + False (Some (ucast High_asid, 0)))) + (MDB (Silc_cnode_ptr + 0xA0) 0 False False))" + "map_to_ctes kh0H (High_cnode_ptr + 0xC0) = Some (CTE (ArchObjectCap (PageTableCap High_pt_ptr (Some (ucast High_asid, 0)))) Null_mdb)" + "map_to_ctes kh0H (High_cnode_ptr + 0x27C0) = Some (CTE (NotificationCap ntfn_ptr 0 False True) + (MDB 0 (Silc_cnode_ptr + 0x27C0) False False))" + "map_to_ctes kh0H (Silc_cnode_ptr + 0x40) = Some (CTE (CNodeCap Silc_cnode_ptr 10 2 10) Null_mdb)" + "map_to_ctes kh0H (Silc_cnode_ptr + 0xA0) = Some (CTE (ArchObjectCap (FrameCap shared_page_ptr_virt VMReadOnly RISCVLargePage + False (Some (ucast Silc_asid, 0)))) + (MDB (Low_cnode_ptr + 0xA0) (High_cnode_ptr + 0xA0) False False))" + "map_to_ctes kh0H (Silc_cnode_ptr + 0x27C0) = Some (CTE (NotificationCap ntfn_ptr 0 True False) + (MDB (High_cnode_ptr + 318 * 0x20) (Low_cnode_ptr + 318 * 0x20) False False))" + apply (clarsimp simp: map_to_ctes_kh0H_simps(3)[where x="the_nat_to_bl_10 1", simplified the_nat_to_bl_simps, simplified] + kh0H_all_obj_def' to_bl_use_of_bl the_nat_to_bl_simps, fastforce simp: s0_ptr_defs is_aligned_def) + apply (clarsimp simp: map_to_ctes_kh0H_simps(3)[where x="the_nat_to_bl_10 2", simplified the_nat_to_bl_simps, simplified] + kh0H_all_obj_def' to_bl_use_of_bl the_nat_to_bl_simps, fastforce simp: s0_ptr_defs is_aligned_def) + apply (clarsimp simp: map_to_ctes_kh0H_simps(3)[where x="the_nat_to_bl_10 3", simplified the_nat_to_bl_simps, simplified] + kh0H_all_obj_def' to_bl_use_of_bl the_nat_to_bl_simps, fastforce simp: s0_ptr_defs is_aligned_def) + apply (clarsimp simp: map_to_ctes_kh0H_simps(3)[where x="the_nat_to_bl_10 4", simplified the_nat_to_bl_simps, simplified] + kh0H_all_obj_def' to_bl_use_of_bl the_nat_to_bl_simps, fastforce simp: s0_ptr_defs is_aligned_def) + apply (clarsimp simp: map_to_ctes_kh0H_simps(3)[where x="the_nat_to_bl_10 5", simplified the_nat_to_bl_simps, simplified] + kh0H_all_obj_def' to_bl_use_of_bl the_nat_to_bl_simps, fastforce simp: s0_ptr_defs is_aligned_def) + apply (clarsimp simp: map_to_ctes_kh0H_simps(3)[where x="the_nat_to_bl_10 6", simplified the_nat_to_bl_simps, simplified] + kh0H_all_obj_def' to_bl_use_of_bl the_nat_to_bl_simps, fastforce simp: s0_ptr_defs is_aligned_def) + apply (clarsimp simp: map_to_ctes_kh0H_simps(3)[where x="the_nat_to_bl_10 318", simplified the_nat_to_bl_simps, simplified] + kh0H_all_obj_def' to_bl_use_of_bl the_nat_to_bl_simps, fastforce simp: s0_ptr_defs is_aligned_def) + apply (clarsimp simp: map_to_ctes_kh0H_simps(4)[where x="the_nat_to_bl_10 1", simplified the_nat_to_bl_simps, simplified] + kh0H_all_obj_def' to_bl_use_of_bl the_nat_to_bl_simps, fastforce simp: s0_ptr_defs is_aligned_def) + apply (clarsimp simp: map_to_ctes_kh0H_simps(4)[where x="the_nat_to_bl_10 2", simplified the_nat_to_bl_simps, simplified] + kh0H_all_obj_def' to_bl_use_of_bl the_nat_to_bl_simps, fastforce simp: s0_ptr_defs is_aligned_def) + apply (clarsimp simp: map_to_ctes_kh0H_simps(4)[where x="the_nat_to_bl_10 3", simplified the_nat_to_bl_simps, simplified] + kh0H_all_obj_def' to_bl_use_of_bl the_nat_to_bl_simps, fastforce simp: s0_ptr_defs is_aligned_def) + apply (clarsimp simp: map_to_ctes_kh0H_simps(4)[where x="the_nat_to_bl_10 4", simplified the_nat_to_bl_simps, simplified] + kh0H_all_obj_def' to_bl_use_of_bl the_nat_to_bl_simps, fastforce simp: s0_ptr_defs is_aligned_def) + apply (clarsimp simp: map_to_ctes_kh0H_simps(4)[where x="the_nat_to_bl_10 5", simplified the_nat_to_bl_simps, simplified] + kh0H_all_obj_def' to_bl_use_of_bl the_nat_to_bl_simps, fastforce simp: s0_ptr_defs is_aligned_def) + apply (clarsimp simp: map_to_ctes_kh0H_simps(4)[where x="the_nat_to_bl_10 6", simplified the_nat_to_bl_simps, simplified] + kh0H_all_obj_def' to_bl_use_of_bl the_nat_to_bl_simps, fastforce simp: s0_ptr_defs is_aligned_def) + apply (clarsimp simp: map_to_ctes_kh0H_simps(4)[where x="the_nat_to_bl_10 318", simplified the_nat_to_bl_simps, simplified] + kh0H_all_obj_def' to_bl_use_of_bl the_nat_to_bl_simps, fastforce simp: s0_ptr_defs is_aligned_def) + apply (clarsimp simp: map_to_ctes_kh0H_simps(5)[where x="the_nat_to_bl_10 2", simplified the_nat_to_bl_simps, simplified] + kh0H_all_obj_def' to_bl_use_of_bl the_nat_to_bl_simps, fastforce simp: s0_ptr_defs is_aligned_def) + apply (clarsimp simp: map_to_ctes_kh0H_simps(5)[where x="the_nat_to_bl_10 5", simplified the_nat_to_bl_simps, simplified] + kh0H_all_obj_def' to_bl_use_of_bl the_nat_to_bl_simps, fastforce simp: s0_ptr_defs is_aligned_def) + apply (clarsimp simp: map_to_ctes_kh0H_simps(5)[where x="the_nat_to_bl_10 318", simplified the_nat_to_bl_simps, simplified] + kh0H_all_obj_def' to_bl_use_of_bl the_nat_to_bl_simps, fastforce simp: s0_ptr_defs is_aligned_def) + done + +lemma mdb_next_s0H: + "p' \ 0 + \ map_to_ctes kh0H \ p \ p' = + (p = Low_cnode_ptr + 0x27C0 \ p' = Silc_cnode_ptr + 0x27C0 \ + p = Silc_cnode_ptr + 0x27C0 \ p' = High_cnode_ptr + 0x27C0 \ + p = High_cnode_ptr + 0xA0 \ p' = Silc_cnode_ptr + 0xA0 \ + p = Silc_cnode_ptr + 0xA0 \ p' = Low_cnode_ptr + 0xA0 \ + p = Low_tcb_ptr \ p' = Low_cnode_ptr + 0x40 \ + p = Low_tcb_ptr + 0x20 \ p' = Low_cnode_ptr + 0x60 \ + p = High_tcb_ptr \ p' = High_cnode_ptr + 0x40 \ + p = High_tcb_ptr + 0x20 \ p' = High_cnode_ptr + 0x60)" + apply (rule iffI) + apply (simp add: next_unfold') + apply (elim exE conjE) + apply (frule map_to_ctes_kh0H_SomeD) + apply (elim disjE, simp_all)[1] + apply (clarsimp simp: kh0H_all_obj_def' to_bl_use_of_bl the_nat_to_bl_simps + ucast_shiftr_13E ucast_shiftr_5 s0_ptrs_aligned + split: if_split_asm) + apply (clarsimp simp: kh0H_all_obj_def' to_bl_use_of_bl the_nat_to_bl_simps + ucast_shiftr_5 s0_ptrs_aligned + split: if_split_asm) + apply (clarsimp simp: kh0H_all_obj_def' to_bl_use_of_bl the_nat_to_bl_simps + ucast_shiftr_13E ucast_shiftr_5 s0_ptrs_aligned + split: if_split_asm) + apply (clarsimp simp: next_unfold' map_to_ctes_kh0H_dom) + apply (elim disjE, simp_all add: kh0H_all_obj_def') + done + +lemma mdb_prev_s0H: + "p \ 0 + \ map_to_ctes kh0H \ p \ p' = + (p = Low_cnode_ptr + 0x27C0 \ p' = Silc_cnode_ptr + 0x27C0 \ + p = Silc_cnode_ptr + 0x27C0 \ p' = High_cnode_ptr + 0x27C0 \ + p = High_cnode_ptr + 0xA0 \ p' = Silc_cnode_ptr + 0xA0 \ + p = Silc_cnode_ptr + 0xA0 \ p' = Low_cnode_ptr + 0xA0 \ + p = Low_tcb_ptr \ p' = Low_cnode_ptr + 0x40 \ + p = Low_tcb_ptr + 0x20 \ p' = Low_cnode_ptr + 0x60 \ + p = High_tcb_ptr \ p' = High_cnode_ptr + 0x40 \ + p = High_tcb_ptr + 0x20 \ p' = High_cnode_ptr + 0x60)" + apply (rule iffI) + apply (simp add: mdb_prev_def) + apply (elim exE conjE) + apply (frule map_to_ctes_kh0H_SomeD) + apply (elim disjE, simp_all)[1] + apply clarsimp + apply (clarsimp simp: kh0H_all_obj_def' to_bl_use_of_bl the_nat_to_bl_simps + ucast_shiftr_13E ucast_shiftr_5 s0_ptrs_aligned + split: if_split_asm) + apply clarsimp + apply (clarsimp simp: kh0H_all_obj_def' to_bl_use_of_bl the_nat_to_bl_simps + ucast_shiftr_13E ucast_shiftr_3 ucast_shiftr_2 s0_ptrs_aligned + split: if_split_asm) + apply clarsimp + apply (clarsimp simp: kh0H_all_obj_def' to_bl_use_of_bl the_nat_to_bl_simps + ucast_shiftr_5 ucast_shiftr_3 ucast_shiftr_2 s0_ptrs_aligned + split: if_split_asm) + apply (clarsimp simp: mdb_prev_def map_to_ctes_kh0H_dom) + apply (elim disjE, simp_all add: kh0H_all_obj_def') + done + +lemma mdb_next_trancl_s0H: + "p' \ 0 + \ map_to_ctes kh0H \ p \\<^sup>+ p' = + (p = Low_cnode_ptr + 0x27C0 \ p' = Silc_cnode_ptr + 0x27C0 \ + p = Silc_cnode_ptr + 0x27C0 \ p' = High_cnode_ptr + 0x27C0 \ + p = Low_cnode_ptr + 0x27C0 \ p' = High_cnode_ptr + 0x27C0 \ + p = High_cnode_ptr + 0xA0 \ p' = Silc_cnode_ptr + 0xA0 \ + p = Silc_cnode_ptr + 0xA0 \ p' = Low_cnode_ptr + 0xA0 \ + p = High_cnode_ptr + 0xA0 \ p' = Low_cnode_ptr + 0xA0 \ + p = Low_tcb_ptr \ p' = Low_cnode_ptr + 0x40 \ + p = Low_tcb_ptr + 0x20 \ p' = Low_cnode_ptr + 0x60 \ + p = High_tcb_ptr \ p' = High_cnode_ptr + 0x40 \ + p = High_tcb_ptr + 0x20 \ p' = High_cnode_ptr + 0x60)" + apply (rule iffI) + apply (erule converse_trancl_induct) + apply (clarsimp simp: mdb_next_s0H) + apply (subst (asm) mdb_next_s0H) + apply (clarsimp simp: s0_ptr_defs) + apply (clarsimp simp: s0_ptr_defs del: disjCI) + apply ((erule_tac P="y = _ \ _" in disjE | clarsimp)+)[1] + apply (elim disjE) + apply (rule r_into_trancl, simp add: mdb_next_s0H) + apply (rule r_into_trancl, simp add: mdb_next_s0H) + apply (rule r_r_into_trancl[where b="Silc_cnode_ptr + 0x27C0"]) + apply (simp add: mdb_next_s0H s0_ptr_defs) + apply (simp add: mdb_next_s0H) + apply (rule r_into_trancl, simp add: mdb_next_s0H) + apply (rule r_into_trancl, simp add: mdb_next_s0H) + apply (rule r_r_into_trancl[where b="Silc_cnode_ptr + 0xA0"]) + apply (simp add: mdb_next_s0H s0_ptr_defs) + apply (simp add: mdb_next_s0H) + apply (rule r_into_trancl, simp add: mdb_next_s0H) + apply (rule r_into_trancl, simp add: mdb_next_s0H) + apply (rule r_into_trancl, simp add: mdb_next_s0H) + apply (rule r_into_trancl, simp add: mdb_next_s0H) + done + +lemma mdb_next_rtrancl_not_0_s0H: + "\ map_to_ctes kh0H \ p \\<^sup>* p'; p' \ 0 \ \ p \ 0" + apply (drule rtranclD) + apply (clarsimp simp: mdb_next_trancl_s0H s0_ptr_defs) + done + +lemma sameRegionAs_s0H: + "\ map_to_ctes kh0H p = Some (CTE cap mdb); map_to_ctes kh0H p' = Some (CTE cap' mdb'); + sameRegionAs cap cap'; p \ p' \ + \ (p = Low_cnode_ptr + 0x27C0 \ (p' = Silc_cnode_ptr + 0x27C0 \ p' = High_cnode_ptr + 0x27C0) \ + p = Silc_cnode_ptr + 0x27C0 \ (p' = Low_cnode_ptr + 0x27C0 \ p' = High_cnode_ptr + 0x27C0) \ + p = High_cnode_ptr + 0x27C0 \ (p' = Low_cnode_ptr + 0x27C0 \ p' = Silc_cnode_ptr + 0x27C0) \ + p = Low_cnode_ptr + 0xA0 \ (p' = Silc_cnode_ptr + 0xA0 \ p' = High_cnode_ptr + 0xA0) \ + p = Silc_cnode_ptr + 0xA0 \ (p' = Low_cnode_ptr + 0xA0 \ p' = High_cnode_ptr + 0xA0) \ + p = High_cnode_ptr + 0xA0 \ (p' = Low_cnode_ptr + 0xA0 \ p' = Silc_cnode_ptr + 0xA0) \ + p = Low_tcb_ptr \ p' = Low_cnode_ptr + 0x40 \ + p = Low_cnode_ptr + 0x40 \ p' = Low_tcb_ptr \ + p = Low_tcb_ptr + 0x20 \ p' = Low_cnode_ptr + 0x60 \ + p = Low_cnode_ptr + 0x60 \ p' = Low_tcb_ptr + 0x20 \ + p = High_tcb_ptr \ p' = High_cnode_ptr + 0x40 \ + p = High_cnode_ptr + 0x40 \ p' = High_tcb_ptr \ + p = High_tcb_ptr + 0x20 \ p' = High_cnode_ptr + 0x60 \ + p = High_cnode_ptr + 0x60 \ p' = High_tcb_ptr + 0x20)" + supply option.case_cong[cong] if_cong[cong] s0_ptrs_aligned[simp] + apply (frule_tac x=p in map_to_ctes_kh0H_SomeD) + apply (elim disjE, simp_all) + apply (frule_tac x=p' in map_to_ctes_kh0H_SomeD) + apply (elim disjE, simp_all add: sameRegionAs_def isCap_simps)[1] + apply (clarsimp simp: kh0H_all_obj_def' s0_ptr_defs split: if_split_asm) + apply (clarsimp simp: kh0H_all_obj_def' s0_ptr_defs split: if_split_asm) + apply (clarsimp simp: to_bl_use_of_bl the_nat_to_bl_simps kh0H_all_obj_def' ucast_shiftr_2 + split: if_split_asm) + apply ((frule_tac x=p' in map_to_ctes_kh0H_SomeD, + (elim disjE, simp_all add: sameRegionAs_def RISCV64_H.sameRegionAs_def isCap_simps)[1], + ((clarsimp simp: sameRegionAs_def RISCV64_H.sameRegionAs_def isCap_simps to_bl_use_of_bl + the_nat_to_bl_simps kh0H_all_obj_def' ucast_shiftr_2 ucast_shiftr_3 + split: if_split_asm)+)[3])+)[5] + apply (clarsimp simp: kh0H_all_obj_def' split: if_split_asm) + apply (frule_tac x=p' in map_to_ctes_kh0H_SomeD) + apply (elim disjE, simp_all add: sameRegionAs_def isCap_simps)[1] + apply (clarsimp simp: kh0H_all_obj_def' to_bl_use_of_bl the_nat_to_bl_simps + split: if_split_asm) + apply (drule(2) ucast_shiftr_13E; clarsimp) + apply (drule(2) ucast_shiftr_13E; clarsimp) + apply (clarsimp simp: kh0H_all_obj_def' to_bl_use_of_bl the_nat_to_bl_simps ucast_shiftr_13E + split: if_split_asm) + apply (clarsimp simp: kh0H_all_obj_def' to_bl_use_of_bl the_nat_to_bl_simps ucast_shiftr_13E + split: if_split_asm) + apply (frule_tac x=p' in map_to_ctes_kh0H_SomeD) + apply (elim disjE, simp_all add: sameRegionAs_def RISCV64_H.sameRegionAs_def isCap_simps + split: if_split_asm)[1] + apply (clarsimp simp: kh0H_all_obj_def' to_bl_use_of_bl the_nat_to_bl_simps split: if_split_asm) + apply (drule(2) ucast_shiftr_5; clarsimp) + apply (drule(2) ucast_shiftr_5; clarsimp) + apply (clarsimp simp: kh0H_all_obj_def' to_bl_use_of_bl the_nat_to_bl_simps split: if_split_asm) + apply (drule(2) ucast_shiftr_5; clarsimp) + apply (drule(2) ucast_shiftr_5; clarsimp) + apply (clarsimp simp: kh0H_all_obj_def' to_bl_use_of_bl the_nat_to_bl_simps split: if_split_asm) + apply (drule(2) ucast_shiftr_5; clarsimp) + apply (drule(2) ucast_shiftr_5; clarsimp) + apply (frule_tac x=p' in map_to_ctes_kh0H_SomeD) + apply (elim disjE, simp_all add: sameRegionAs_def RISCV64_H.sameRegionAs_def isCap_simps + split: if_split_asm)[1] + apply (clarsimp simp: kh0H_all_obj_def' to_bl_use_of_bl the_nat_to_bl_simps split: if_split_asm) + apply (drule(2) ucast_shiftr_2; clarsimp) + apply (drule(2) ucast_shiftr_2; clarsimp) + apply (clarsimp simp: kh0H_all_obj_def' to_bl_use_of_bl the_nat_to_bl_simps split: if_split_asm) + apply (clarsimp simp: kh0H_all_obj_def' to_bl_use_of_bl the_nat_to_bl_simps split: if_split_asm) + apply (clarsimp simp: kh0H_all_obj_def' split: if_split_asm) + apply (frule_tac x=p' in map_to_ctes_kh0H_SomeD) + apply (elim disjE, simp_all add: sameRegionAs_def isCap_simps)[1] + apply (clarsimp simp: kh0H_all_obj_def' to_bl_use_of_bl the_nat_to_bl_simps ucast_shiftr_13E + split: if_split_asm) + apply (clarsimp simp: kh0H_all_obj_def' to_bl_use_of_bl the_nat_to_bl_simps split: if_split_asm) + apply (drule(2) ucast_shiftr_13E; clarsimp) + apply (drule(2) ucast_shiftr_13E; clarsimp) + apply (clarsimp simp: kh0H_all_obj_def' to_bl_use_of_bl the_nat_to_bl_simps ucast_shiftr_13E + split: if_split_asm) + apply (frule_tac x=p' in map_to_ctes_kh0H_SomeD) + apply (elim disjE, simp_all add: sameRegionAs_def RISCV64_H.sameRegionAs_def isCap_simps + split: if_split_asm)[1] + apply (clarsimp simp: kh0H_all_obj_def' to_bl_use_of_bl the_nat_to_bl_simps ucast_shiftr_3 + split: if_split_asm) + apply (clarsimp simp: kh0H_all_obj_def' to_bl_use_of_bl the_nat_to_bl_simps + split: if_split_asm) + apply (drule(2) ucast_shiftr_6; clarsimp) + apply (drule(2) ucast_shiftr_6; clarsimp) + apply (clarsimp simp: kh0H_all_obj_def' to_bl_use_of_bl the_nat_to_bl_simps split: if_split_asm) + apply (frule_tac x=p' in map_to_ctes_kh0H_SomeD) + apply (elim disjE, simp_all add: sameRegionAs_def RISCV64_H.sameRegionAs_def isCap_simps + split: if_split_asm)[1] + apply (clarsimp simp: kh0H_all_obj_def' to_bl_use_of_bl the_nat_to_bl_simps ucast_shiftr_3 + split: if_split_asm) + apply (drule(2) ucast_shiftr_5; clarsimp) + apply (drule(2) ucast_shiftr_5; clarsimp) + apply (clarsimp simp: kh0H_all_obj_def' to_bl_use_of_bl the_nat_to_bl_simps split: if_split_asm) + apply (drule(2) ucast_shiftr_5; clarsimp) + apply (drule(2) ucast_shiftr_5; clarsimp) + apply (clarsimp simp: kh0H_all_obj_def' to_bl_use_of_bl the_nat_to_bl_simps split: if_split_asm) + apply (drule(2) ucast_shiftr_5; clarsimp) + apply (drule(2) ucast_shiftr_5; clarsimp) + apply (frule_tac x=p' in map_to_ctes_kh0H_SomeD) + apply (elim disjE, simp_all add: sameRegionAs_def RISCV64_H.sameRegionAs_def isCap_simps + split: if_split_asm)[1] + apply (clarsimp simp: kh0H_all_obj_def' to_bl_use_of_bl the_nat_to_bl_simps ucast_shiftr_3 + split: if_split_asm) + apply (clarsimp simp: kh0H_all_obj_def' to_bl_use_of_bl the_nat_to_bl_simps split: if_split_asm) + apply (drule(2) ucast_shiftr_4; clarsimp) + apply (drule(2) ucast_shiftr_4; clarsimp) + apply (clarsimp simp: kh0H_all_obj_def' to_bl_use_of_bl the_nat_to_bl_simps split: if_split_asm) + apply (frule_tac x=p' in map_to_ctes_kh0H_SomeD) + apply (elim disjE, simp_all add: sameRegionAs_def RISCV64_H.sameRegionAs_def isCap_simps + split: if_split_asm)[1] + apply (clarsimp simp: kh0H_all_obj_def' to_bl_use_of_bl the_nat_to_bl_simps ucast_shiftr_3 + split: if_split_asm) + apply (clarsimp simp: kh0H_all_obj_def' to_bl_use_of_bl the_nat_to_bl_simps split: if_split_asm) + apply (clarsimp simp: kh0H_all_obj_def' to_bl_use_of_bl the_nat_to_bl_simps split: if_split_asm) + apply (drule(2) ucast_shiftr_3; clarsimp) + apply (drule(2) ucast_shiftr_3; clarsimp) + apply (clarsimp simp: kh0H_all_obj_def' to_bl_use_of_bl the_nat_to_bl_simps split: if_split_asm) + apply (frule_tac x=p' in map_to_ctes_kh0H_SomeD) + apply (elim disjE, simp_all add: sameRegionAs_def RISCV64_H.sameRegionAs_def isCap_simps + split: if_split_asm)[1] + apply (clarsimp simp: kh0H_all_obj_def' to_bl_use_of_bl the_nat_to_bl_simps ucast_shiftr_3 + split: if_split_asm) + apply (drule(2) ucast_shiftr_2; clarsimp) + apply (clarsimp simp: kh0H_all_obj_def' to_bl_use_of_bl the_nat_to_bl_simps split: if_split_asm) + apply (clarsimp simp: kh0H_all_obj_def' to_bl_use_of_bl the_nat_to_bl_simps split: if_split_asm) + apply (drule(2) ucast_shiftr_2; clarsimp) + apply (drule(2) ucast_shiftr_2; clarsimp) + apply (clarsimp simp: kh0H_all_obj_def' to_bl_use_of_bl the_nat_to_bl_simps split: if_split_asm) + apply (frule_tac x=p' in map_to_ctes_kh0H_SomeD) + apply (elim disjE, simp_all add: sameRegionAs_def RISCV64_H.sameRegionAs_def isCap_simps + split: if_split_asm)[1] + apply (clarsimp simp: kh0H_all_obj_def' to_bl_use_of_bl the_nat_to_bl_simps ucast_shiftr_3 + split: if_split_asm) + apply (clarsimp simp: kh0H_all_obj_def' to_bl_use_of_bl the_nat_to_bl_simps split: if_split_asm) + apply (drule(2) ucast_shiftr_1; clarsimp) + apply (drule(2) ucast_shiftr_1; clarsimp) + apply (clarsimp simp: kh0H_all_obj_def' to_bl_use_of_bl the_nat_to_bl_simps split: if_split_asm) + apply (clarsimp simp: kh0H_all_obj_def' split: if_split_asm) + apply (frule_tac x=p' in map_to_ctes_kh0H_SomeD) + apply (elim disjE, simp_all add: sameRegionAs_def isCap_simps)[1] + apply (clarsimp simp: kh0H_all_obj_def' to_bl_use_of_bl the_nat_to_bl_simps ucast_shiftr_13E + split: if_split_asm) + apply (clarsimp simp: kh0H_all_obj_def' to_bl_use_of_bl the_nat_to_bl_simps split: if_split_asm) + apply (drule(2) ucast_shiftr_13E; clarsimp) + apply (drule(2) ucast_shiftr_13E; clarsimp) + apply (clarsimp simp: kh0H_all_obj_def' to_bl_use_of_bl the_nat_to_bl_simps ucast_shiftr_13E + split: if_split_asm) + apply (drule(2) ucast_shiftr_13E; clarsimp) + apply (drule(2) ucast_shiftr_13E; clarsimp) + apply (frule_tac x=p' in map_to_ctes_kh0H_SomeD) + apply (elim disjE, simp_all add: sameRegionAs_def RISCV64_H.sameRegionAs_def isCap_simps + split: if_split_asm)[1] + apply (clarsimp simp: kh0H_all_obj_def' to_bl_use_of_bl the_nat_to_bl_simps ucast_shiftr_3 + split: if_split_asm) + apply (clarsimp simp: kh0H_all_obj_def' to_bl_use_of_bl the_nat_to_bl_simps split: if_split_asm) + apply (clarsimp simp: kh0H_all_obj_def' to_bl_use_of_bl the_nat_to_bl_simps split: if_split_asm) + apply (drule(2) ucast_shiftr_6; clarsimp) + apply (drule(2) ucast_shiftr_6; clarsimp) + apply (frule_tac x=p' in map_to_ctes_kh0H_SomeD) + apply (elim disjE, simp_all add: sameRegionAs_def RISCV64_H.sameRegionAs_def isCap_simps + split: if_split_asm)[1] + apply (clarsimp simp: kh0H_all_obj_def' to_bl_use_of_bl the_nat_to_bl_simps ucast_shiftr_3 + split: if_split_asm) + apply (drule(2) ucast_shiftr_5; clarsimp) + apply (drule(2) ucast_shiftr_5; clarsimp) + apply (clarsimp simp: kh0H_all_obj_def' to_bl_use_of_bl the_nat_to_bl_simps + split: if_split_asm) + apply (drule(2) ucast_shiftr_5; clarsimp) + apply (drule(2) ucast_shiftr_5; clarsimp) + apply (clarsimp simp: kh0H_all_obj_def' to_bl_use_of_bl the_nat_to_bl_simps + split: if_split_asm) + apply (drule(2) ucast_shiftr_5; clarsimp) + apply (drule(2) ucast_shiftr_5; clarsimp) + apply (frule_tac x=p' in map_to_ctes_kh0H_SomeD) + apply (elim disjE, simp_all add: sameRegionAs_def RISCV64_H.sameRegionAs_def isCap_simps + split: if_split_asm)[1] + apply (clarsimp simp: kh0H_all_obj_def' to_bl_use_of_bl the_nat_to_bl_simps ucast_shiftr_3 + split: if_split_asm) + apply (clarsimp simp: kh0H_all_obj_def' to_bl_use_of_bl the_nat_to_bl_simps split: if_split_asm) + apply (clarsimp simp: kh0H_all_obj_def' to_bl_use_of_bl the_nat_to_bl_simps split: if_split_asm) + apply (drule(2) ucast_shiftr_4; clarsimp) + apply (drule(2) ucast_shiftr_4; clarsimp) + apply (frule_tac x=p' in map_to_ctes_kh0H_SomeD) + apply (elim disjE, simp_all add: sameRegionAs_def RISCV64_H.sameRegionAs_def isCap_simps + split: if_split_asm)[1] + apply (clarsimp simp: kh0H_all_obj_def' to_bl_use_of_bl the_nat_to_bl_simps ucast_shiftr_3 + split: if_split_asm) + apply (clarsimp simp: kh0H_all_obj_def' to_bl_use_of_bl the_nat_to_bl_simps split: if_split_asm) + apply (clarsimp simp: kh0H_all_obj_def' to_bl_use_of_bl the_nat_to_bl_simps split: if_split_asm) + apply (clarsimp simp: kh0H_all_obj_def' to_bl_use_of_bl the_nat_to_bl_simps split: if_split_asm) + apply (drule(2) ucast_shiftr_3; clarsimp) + apply (drule(2) ucast_shiftr_3; clarsimp) + apply (frule_tac x=p' in map_to_ctes_kh0H_SomeD) + apply (elim disjE, simp_all add: sameRegionAs_def RISCV64_H.sameRegionAs_def isCap_simps + split: if_split_asm)[1] + apply (clarsimp simp: kh0H_all_obj_def' to_bl_use_of_bl the_nat_to_bl_simps ucast_shiftr_3 + split: if_split_asm) + apply (drule(2) ucast_shiftr_2; clarsimp) + apply (clarsimp simp: kh0H_all_obj_def' to_bl_use_of_bl the_nat_to_bl_simps split: if_split_asm) + apply (clarsimp simp: kh0H_all_obj_def' to_bl_use_of_bl the_nat_to_bl_simps split: if_split_asm) + apply (clarsimp simp: kh0H_all_obj_def' to_bl_use_of_bl the_nat_to_bl_simps split: if_split_asm) + apply (drule(2) ucast_shiftr_2; clarsimp) + apply (drule(2) ucast_shiftr_2; clarsimp) + apply (frule_tac x=p' in map_to_ctes_kh0H_SomeD) + apply (elim disjE, simp_all add: sameRegionAs_def RISCV64_H.sameRegionAs_def isCap_simps + split: if_split_asm)[1] + apply (clarsimp simp: kh0H_all_obj_def' to_bl_use_of_bl the_nat_to_bl_simps ucast_shiftr_3 + split: if_split_asm) + apply (clarsimp simp: kh0H_all_obj_def' to_bl_use_of_bl the_nat_to_bl_simps split: if_split_asm) + apply (clarsimp simp: kh0H_all_obj_def' to_bl_use_of_bl the_nat_to_bl_simps split: if_split_asm) + apply (drule(2) ucast_shiftr_1; clarsimp) + apply (drule(2) ucast_shiftr_1; clarsimp) + done + +lemma mdb_prevI: + "m p = Some c \ m \ mdbPrev (cteMDBNode c) \ p" + by (simp add: mdb_prev_def) + +lemma mdb_nextI: + "m p = Some c \ m \ p \ mdbNext (cteMDBNode c)" + by (simp add: mdb_next_unfold) + +lemma s0H_valid_pspace': + notes pteBits_def[simp] objBits_defs[simp] + notes valid_arch_badges_def[simp] mdb_chunked_arch_assms_def[simp] + assumes "1 \ maxDomain" + shows "valid_pspace' s0H_internal" + using assms + supply option.case_cong[cong] if_cong[cong] + apply (clarsimp simp: valid_pspace'_def s0H_pspace_distinct' s0H_valid_objs') + apply (intro conjI) + apply (clarsimp simp: pspace_aligned'_def) + apply (drule kh0H_SomeD) + apply (elim disjE; clarsimp simp: s0_ptr_defs kh0H_all_obj_def objBitsKO_def archObjSize_def + cnode_offs_range_def page_offs_range_def pt_offs_range_def + irq_node_offs_range_def is_aligned_mask mask_def bit_simps + split: if_splits) + apply (clarsimp simp: pspace_canonical'_def) + apply (drule kh0H_SomeD') + apply (fastforce elim: dual_order.trans + intro: above_pptr_base_canonical + simp: irq_node_offs_range_def cnode_offs_range_def + pt_offs_range_def page_offs_range_def s0_ptr_defs) + apply (clarsimp simp: pspace_in_kernel_mappings'_def) + apply (clarsimp simp: kernel_mappings_def) + apply (drule kh0H_SomeD') + apply (fastforce elim: dual_order.trans + simp: s0_ptr_defs irq_node_offs_range_def cnode_offs_range_def + pt_offs_range_def page_offs_range_def kernel_mappings_def) + apply (clarsimp simp: no_0_obj'_def) + apply (rule ccontr, clarsimp) + apply (drule kh0H_SomeD) + apply (simp add: irq_node_offs_range_def cnode_offs_range_def + page_offs_range_def pt_offs_range_def s0_ptr_defs) + apply (simp add: valid_mdb'_def) + apply (clarsimp simp: valid_mdb_ctes_def) + apply (intro conjI) + apply (clarsimp simp: valid_dlist_def3) + apply (rule conjI) + apply (clarsimp simp: mdb_next_s0H) + apply (subst mdb_prev_s0H) + apply (fastforce simp: s0_ptr_defs) + apply simp + apply (clarsimp simp: mdb_prev_s0H) + apply (subst mdb_next_s0H) + apply (fastforce simp: s0_ptr_defs) + apply simp + apply (clarsimp simp: no_0_def) + apply (rule ccontr) + apply clarsimp + apply (drule map_to_ctes_kh0H_SomeD) + apply (elim disjE, (clarsimp simp: irq_node_offs_range_def + cnode_offs_range_def s0_ptr_defs)+)[1] + apply (clarsimp simp: mdb_chain_0_def) + apply (frule map_to_ctes_kh0H_SomeD) + apply (elim disjE) + apply ((erule r_into_trancl[OF next_fold], clarsimp)+)[5] + apply ((rule r_r_into_trancl[OF next_fold next_fold], simp+)+)[2] + apply ((erule r_into_trancl[OF next_fold], clarsimp)+)[3] + apply ((rule r_r_into_trancl[OF next_fold next_fold], simp+)+)[2] + apply ((erule r_into_trancl[OF next_fold], clarsimp)+)[5] + apply (clarsimp simp: kh0H_all_obj_def Silc_cte_cte_def cnode_offs_range_def + split: if_split_asm) + apply ((rule r_r_into_trancl[OF next_fold next_fold], simp+)+)[2] + apply (erule r_into_trancl[OF next_fold], simp) + apply (erule r_into_trancl[OF next_fold], simp) + apply (clarsimp simp: kh0H_all_obj_def High_cte_cte_def cnode_offs_range_def + split: if_split_asm) + apply ((erule r_into_trancl[OF next_fold], clarsimp)+)[2] + apply (rule trancl_into_trancl2[OF next_fold], simp+)[1] + apply (rule r_r_into_trancl[OF next_fold next_fold], simp+)[1] + apply ((erule r_into_trancl[OF next_fold], clarsimp)+)[5] + apply (clarsimp simp: kh0H_all_obj_def Low_cte_cte_def cnode_offs_range_def + split: if_split_asm) + apply (rule trancl_into_trancl2[OF next_fold], simp+)[1] + apply (rule r_r_into_trancl[OF next_fold next_fold], simp+)[1] + apply (erule r_into_trancl[OF next_fold], simp)+ + apply (clarsimp simp: valid_badges_def) + apply (frule_tac x=p in map_to_ctes_kh0H_SomeD) + apply (elim disjE, (clarsimp simp: Low_cte_cte_def High_cte_cte_def Silc_cte_cte_def + kh0H_all_obj_def isCap_simps + split: if_split_asm)+)[1] + apply (frule_tac x=p' in map_to_ctes_kh0H_SomeD) + apply (elim disjE, (clarsimp simp: Low_cte_cte_def High_cte_cte_def Silc_cte_cte_def + kh0H_all_obj_def isCap_simps sameRegionAs_def + split: if_split_asm)+)[1] + apply (intro conjI impI) + apply (clarsimp simp: High_cte_cte_def kh0H_all_obj_def isCap_simps split: if_split_asm) + apply (drule(1) sameRegion_ntfn) + apply (clarsimp simp: High_cte_cte_def kh0H_all_obj_def isCap_simps split: if_split_asm) + apply (frule_tac x=p' in map_to_ctes_kh0H_SomeD) + apply (elim disjE, (clarsimp simp: High_cte_cte_def Low_cte_cte_def + Silc_cte_cte_def kh0H_all_obj_def + split: if_split_asm)+)[1] + apply (intro conjI impI) + apply (clarsimp simp: Low_cte_cte_def kh0H_all_obj_def isCap_simps split: if_split_asm) + apply (drule(1) sameRegion_ntfn) + apply (clarsimp simp: Low_cte_cte_def kh0H_all_obj_def isCap_simps split: if_split_asm) + apply (frule_tac x=p' in map_to_ctes_kh0H_SomeD) + apply (elim disjE, (clarsimp simp: High_cte_cte_def Low_cte_cte_def + Silc_cte_cte_def kh0H_all_obj_def + split: if_split_asm)+)[1] + apply (clarsimp simp: caps_contained'_def) + apply (drule_tac x=p in map_to_ctes_kh0H_SomeD) + apply (elim disjE, simp_all)[1] + apply (clarsimp simp: Silc_cte_cte_def kh0H_all_obj_def split: if_split_asm) + apply (clarsimp simp: High_cte_cte_def kh0H_all_obj_def split: if_split_asm) + apply (clarsimp simp: Low_cte_cte_def kh0H_all_obj_def split: if_split_asm) + apply (clarsimp simp: mdb_chunked_def) + apply (frule(3) sameRegionAs_s0H) + apply (clarsimp simp: conj_disj_distribL) + apply (prop_tac "p \ 0 \ p' \ 0") + apply (elim disjE; clarsimp simp: s0_ptr_defs) + apply (intro conjI; clarsimp) + apply (simp add: mdb_next_trancl_s0H) + apply (elim disjE, simp_all)[1] + apply (thin_tac "_ \ _") + apply (clarsimp simp: mdb_next_trancl_s0H) + apply (elim disjE) + apply ((clarsimp simp: is_chunk_def, + drule mdb_next_rtrancl_not_0_s0H, fastforce simp: s0_ptr_defs, + clarsimp simp: mdb_next_trancl_s0H, + (elim disjE, simp_all add: sameRegionAs_def RISCV64_H.sameRegionAs_def + isCap_simps kh0H_all_obj_def' + , (fastforce simp: s0_ptr_defs)+)[1])+)[10] + apply (thin_tac "_ \ _") + apply (clarsimp simp: mdb_next_trancl_s0H) + apply (elim disjE) + apply ((clarsimp simp: is_chunk_def, + drule mdb_next_rtrancl_not_0_s0H, fastforce simp: s0_ptr_defs, + clarsimp simp: mdb_next_trancl_s0H, + (elim disjE, simp_all add: sameRegionAs_def RISCV64_H.sameRegionAs_def + isCap_simps kh0H_all_obj_def' + , (fastforce simp: s0_ptr_defs)+)[1])+)[10] + apply (clarsimp simp: untyped_mdb'_def) + apply (drule_tac x=p in map_to_ctes_kh0H_SomeD) + apply (elim disjE, simp_all add: isCap_simps kh0H_all_obj_def')[1] + apply ((clarsimp split: if_split_asm)+)[3] + apply (clarsimp simp: untyped_inc'_def) + apply (rule FalseE) + apply (drule_tac x=p in map_to_ctes_kh0H_SomeD) + apply (elim disjE, simp_all add: isCap_simps kh0H_all_obj_def')[1] + apply ((clarsimp split: if_split_asm)+)[3] + apply (clarsimp simp: valid_nullcaps_def) + apply (drule map_to_ctes_kh0H_SomeD) + apply (elim disjE, simp_all add: kh0H_all_obj_def' nullMDBNode_def) + apply ((clarsimp split: if_split_asm)+)[3] + apply (clarsimp simp: ut_revocable'_def) + apply (drule map_to_ctes_kh0H_SomeD) + apply (elim disjE, simp_all add: isCap_simps kh0H_all_obj_def')[1] + apply ((clarsimp split: if_split_asm)+)[3] + apply (clarsimp simp: class_links_def) + apply (subst(asm) mdb_next_s0H) + apply (drule_tac x=p' in map_to_ctes_kh0H_SomeD) + apply (elim disjE, (clarsimp simp: s0_ptr_defs irq_node_offs_range_def cnode_offs_range_def)+)[1] + apply (elim disjE, (clarsimp simp: kh0H_all_obj_def')+)[1] + apply (clarsimp simp: distinct_zombies_def distinct_zombie_caps_def) + apply (drule_tac x=ptr in map_to_ctes_kh0H_SomeD) + apply (elim disjE, simp_all add: isCap_simps kh0H_all_obj_def')[1] + apply ((clarsimp split: if_split_asm)+)[3] + apply (clarsimp simp: irq_control_def) + apply (drule map_to_ctes_kh0H_SomeD) + apply (elim disjE, simp_all add: isCap_simps kh0H_all_obj_def')[1] + apply ((clarsimp split: if_split_asm)+)[3] + apply (clarsimp simp: reply_masters_rvk_fb_def ran_def) + apply (frule map_to_ctes_kh0H_SomeD) + apply (elim disjE, simp_all add: isCap_simps kh0H_all_obj_def')[1] + apply ((clarsimp split: if_split_asm)+)[3] + done + +end + + +(* Instantiate the current, abstract domain scheduler into the + concrete scheduler required for this example *) +axiomatization where + newKSDomSched: "newKSDomSchedule = [(0,0xA), (1, 0xA)]" + +axiomatization where + newKSDomainTime: "newKSDomainTime = 5" + +(* kernel_data_refs is an undefined constant at the moment, and therefore + cannot be referred to in valid_global_refs' and pspace_domain_valid. + We use an axiomatization for the moment. *) +axiomatization where + kdr_valid_global_refs': "valid_global_refs' s0H_internal" and + kdr_pspace_domain_valid: "pspace_domain_valid s0H_internal" + + +context begin interpretation Arch . + +lemma ksArchState0H[simp]: + "ksArchState s0H_internal = arch_state0H" + by (simp add: s0H_internal_def) + +lemma valid_arch_state_s0H: + "valid_arch_state' s0H_internal" + apply (clarsimp simp: valid_arch_state'_def) + apply (intro conjI) + apply (clarsimp simp: valid_asid_table'_def s0H_internal_def arch_state0H_def asid_bits_defs + asid_high_bits_of_def Low_asid_def High_asid_def mask_def s0_ptr_defs) + apply (clarsimp simp: valid_global_pts'_def is_aligned_riscv_global_pt_ptr[simplified bit_simps] + arch_state0H_def page_table_at'_def typ_at'_def ko_wp_at'_def + bit_simps global_ptH_def pt_offs_min) + apply (subst objBitsKO_def) + apply (clarsimp simp: archObjSize_def bit_simps) + apply (intro conjI) + apply (fastforce intro: pspace_distinctD'[OF _ s0H_pspace_distinct'] + simp: global_ptH_def pt_offs_min) + apply (clarsimp simp: is_aligned_mask mask_def s0_ptr_defs) + apply word_bitwise + apply (clarsimp simp: s0_ptr_defs) + apply word_bitwise + apply (clarsimp simp: arch_state0H_def) + done + +lemma s0H_invs: + assumes "1 \ maxDomain" + notes pteBits_def[simp] objBits_defs[simp] + shows "invs' s0H_internal" + using assms + supply option.case_cong[cong] if_cong[cong] + supply raw_tcb_cte_cases_simps[simp] (* FIXME arch-split: legacy, try use tcb_cte_cases_neqs *) + apply (clarsimp simp: invs'_def valid_state'_def s0H_valid_pspace') + apply (rule conjI) + apply (clarsimp simp: sch_act_wf_def ct_in_state'_def st_tcb_at'_def obj_at'_def + s0H_internal_def s0_ptrs_aligned objBitsKO_def Low_tcbH_def) + apply (rule pspace_distinctD''[OF _ s0H_pspace_distinct', simplified s0H_internal_def]) + apply (simp add: objBitsKO_def) + apply (rule conjI) + apply (clarsimp simp: sym_refs_def state_refs_of'_def refs_of'_def split: option.splits) + apply (frule kh0H_SomeD) + apply (elim disjE, simp_all)[1] + apply (clarsimp simp: ntfnH_def ntfn_q_refs_of'_def) + apply (rule conjI) + apply (clarsimp simp: tcb_st_refs_of'_def High_tcbH_def) + apply (clarsimp simp: objBitsKO_def s0_ptrs_aligned) + apply (erule notE, rule pspace_distinctD''[OF _ s0H_pspace_distinct']) + apply (simp add: objBitsKO_def) + apply (clarsimp simp: tcb_st_refs_of'_def Low_tcbH_def) + apply (clarsimp simp: tcb_st_refs_of'_def High_tcbH_def) + apply (rule conjI) + apply (clarsimp simp: ntfnH_def) + apply (clarsimp simp: objBitsKO_def ntfnH_def) + apply (erule impE, simp add: is_aligned_def s0_ptr_defs) + apply (erule notE, rule pspace_distinctD''[OF _ s0H_pspace_distinct']) + apply (simp add: objBitsKO_def ntfnH_def) + apply (clarsimp simp: tcb_st_refs_of'_def idle_tcbH_def) + apply (clarsimp simp: global_ptH_def) + apply (clarsimp simp: Low_ptH_def) + apply (clarsimp simp: High_ptH_def) + apply (clarsimp simp: Low_pdH_def) + apply (clarsimp simp: High_pdH_def) + apply (clarsimp simp: Low_cte_def Low_cte'_def split: if_split_asm) + apply (clarsimp simp: High_cte_def High_cte'_def split: if_split_asm) + apply (clarsimp simp: Silc_cte_def Silc_cte'_def split: if_split_asm) + apply (rule conjI) + apply (clarsimp simp: if_live_then_nonz_cap'_def ko_wp_at'_def) + apply (drule kh0H_SomeD) + apply (elim disjE, simp_all add: kh0H_all_obj_def' objBitsKO_def live'_def hyp_live'_def)[1] + apply (clarsimp simp: ex_nonz_cap_to'_def cte_wp_at_ctes_of) + apply (rule_tac x="Silc_cnode_ptr + 0x27C0" in exI) + apply (clarsimp simp: kh0H_all_obj_def') + apply (clarsimp simp: ex_nonz_cap_to'_def cte_wp_at_ctes_of) + apply (rule_tac x="Low_cnode_ptr + 0x20" in exI) + apply (clarsimp simp: kh0H_all_obj_def') + apply (clarsimp simp: ex_nonz_cap_to'_def cte_wp_at_ctes_of) + apply (rule_tac x="High_cnode_ptr + 0x20" in exI) + apply (clarsimp simp: kh0H_all_obj_def') + apply (clarsimp split: if_split_asm simp: live'_def hyp_live'_def)+ + apply (rule conjI) + apply (clarsimp simp: if_unsafe_then_cap'_def ex_cte_cap_wp_to'_def cte_wp_at_ctes_of) + apply (frule map_to_ctes_kh0H_SomeD) + apply (elim disjE, simp_all add: kh0H_all_obj_def')[1] + apply (rule_tac x="Low_cnode_ptr + 0x20" in exI) + apply (clarsimp simp: kh0H_all_obj_def' image_def) + apply (rule_tac x="Low_cnode_ptr + 0x20" in exI) + apply (clarsimp simp: kh0H_all_obj_def' image_def) + apply (rule_tac x="Low_cnode_ptr + 0x20" in exI) + apply (clarsimp simp: kh0H_all_obj_def' image_def) + apply (rule_tac x="High_cnode_ptr + 0x20" in exI) + apply (clarsimp simp: kh0H_all_obj_def' image_def) + apply (rule_tac x="High_cnode_ptr + 0x20" in exI) + apply (clarsimp simp: kh0H_all_obj_def' image_def) + apply (rule_tac x="High_cnode_ptr + 0x20" in exI) + apply (clarsimp simp: kh0H_all_obj_def' image_def) + apply (rule_tac x="Silc_cnode_ptr + 0x40" in exI) + apply (clarsimp simp: kh0H_all_obj_def' image_def to_bl_use_of_bl the_nat_to_bl_simps split: if_split_asm) + apply (drule(2) ucast_shiftr_13E, rule s0_ptrs_aligned, simp) + apply (rule_tac x="(0x27C0 >> 5)" in bexI) + apply simp + apply (simp add: mask_def) + apply (drule(2) ucast_shiftr_5, rule s0_ptrs_aligned, simp) + apply (rule_tac x=5 in bexI) + apply simp + apply (simp add: mask_def) + apply (drule(2) ucast_shiftr_2, rule s0_ptrs_aligned, simp) + apply (rule_tac x=2 in bexI) + apply simp + apply (simp add: mask_def) + apply (rule_tac x="High_cnode_ptr + 0x40" in exI) + apply (clarsimp simp: kh0H_all_obj_def' image_def to_bl_use_of_bl the_nat_to_bl_simps split: if_split_asm) + apply (drule(2) ucast_shiftr_13E, rule s0_ptrs_aligned, simp) + apply (rule_tac x="0x13E" in bexI) + apply simp + apply (simp add: mask_def) + apply (drule(2) ucast_shiftr_6, rule s0_ptrs_aligned, simp) + apply (rule_tac x=6 in bexI) + apply simp + apply (simp add: mask_def) + apply (drule(2) ucast_shiftr_5, rule s0_ptrs_aligned, simp) + apply (rule_tac x=5 in bexI) + apply simp + apply (simp add: mask_def) + apply (drule(2) ucast_shiftr_4, rule s0_ptrs_aligned, simp) + apply (rule_tac x=4 in bexI) + apply simp + apply (simp add: mask_def) + apply (drule(2) ucast_shiftr_3, rule s0_ptrs_aligned, simp) + apply (rule_tac x=3 in bexI) + apply simp + apply (simp add: mask_def) + apply (drule(2) ucast_shiftr_2, rule s0_ptrs_aligned, simp) + apply (rule_tac x=2 in bexI) + apply simp + apply (simp add: mask_def) + apply (drule(2) ucast_shiftr_1, rule s0_ptrs_aligned, simp) + apply (rule_tac x=1 in bexI) + apply simp + apply (simp add: mask_def) + apply (rule_tac x="Low_cnode_ptr + 0x40" in exI) + apply (clarsimp simp: kh0H_all_obj_def' image_def to_bl_use_of_bl the_nat_to_bl_simps split: if_split_asm) + apply (drule(2) ucast_shiftr_13E, rule s0_ptrs_aligned, simp) + apply (rule_tac x="0x13E" in bexI) + apply simp + apply (simp add: mask_def) + apply (drule(2) ucast_shiftr_6, rule s0_ptrs_aligned, simp) + apply (rule_tac x=6 in bexI) + apply simp + apply (simp add: mask_def) + apply (drule(2) ucast_shiftr_5, rule s0_ptrs_aligned, simp) + apply (rule_tac x=5 in bexI) + apply simp + apply (simp add: mask_def) + apply (drule(2) ucast_shiftr_4, rule s0_ptrs_aligned, simp) + apply (rule_tac x=4 in bexI) + apply simp + apply (simp add: mask_def) + apply (drule(2) ucast_shiftr_3, rule s0_ptrs_aligned, simp) + apply (rule_tac x=3 in bexI) + apply simp + apply (simp add: mask_def) + apply (drule(2) ucast_shiftr_2, rule s0_ptrs_aligned, simp) + apply (rule_tac x=2 in bexI) + apply simp + apply (simp add: mask_def) + apply (drule(2) ucast_shiftr_1, rule s0_ptrs_aligned, simp) + apply (rule_tac x=1 in bexI) + apply simp + apply (simp add: mask_def) + apply (rule conjI) + apply (clarsimp simp: valid_idle'_def pred_tcb_at'_def obj_at'_def objBitsKO_def idle_tcb'_def) + apply (clarsimp simp: s0H_internal_def s0_ptrs_aligned idle_tcbH_def) + apply (rule conjI) + apply (rule pspace_distinctD''[OF _ s0H_pspace_distinct', simplified s0H_internal_def]) + apply (simp add: objBitsKO_def) + apply (clarsimp simp: idle_tcb_ptr_def idle_thread_ptr_def) + apply (rule conjI) + apply (clarsimp simp: kdr_valid_global_refs') (* use axiomatization for now *) + apply (rule conjI) + apply (clarsimp simp: valid_arch_state_s0H) + apply (rule conjI) + apply (clarsimp simp: valid_irq_node'_def) + apply (rule conjI) + apply (clarsimp simp: s0H_internal_def is_aligned_def s0_ptr_defs word_size) + apply (clarsimp simp: obj_at'_def objBitsKO_def s0H_internal_def + shiftl_t2n[where n=4, simplified, symmetric]) + apply (rule conjI) + apply (rule is_aligned_add) + apply (simp add: is_aligned_def s0_ptr_defs) + apply (rule is_aligned_shift) + apply (rule pspace_distinctD''[OF _ s0H_pspace_distinct', simplified s0H_internal_def]) + apply (simp add: objBitsKO_def) + apply (rule conjI) + apply (clarsimp simp: valid_irq_handlers'_def cteCaps_of_def ran_def) + apply (drule_tac map_to_ctes_kh0H_SomeD) + apply (elim disjE, simp_all add: kh0H_all_obj_def')[1] + apply ((clarsimp split: if_split_asm)+)[3] + apply (rule conjI) + apply (clarsimp simp: valid_irq_states'_def s0H_internal_def machine_state0_def) + apply (rule conjI) + apply (clarsimp simp: valid_machine_state'_def s0H_internal_def machine_state0_def) + apply (rule conjI) + apply (clarsimp simp: irqs_masked'_def s0H_internal_def maxIRQ_def timer_irq_def irqInvalid_def) + apply (rule conjI) + apply (clarsimp simp: sym_heap_def opt_map_def projectKOs split: option.splits) + using kh0H_dom_tcb + apply (fastforce simp: kh0H_obj_def) + apply (rule conjI) + apply (clarsimp simp: valid_sched_pointers_def opt_map_def projectKOs split: option.splits) + using kh0H_dom_tcb + apply (fastforce simp: kh0H_obj_def) + apply (rule conjI) + apply (clarsimp simp: valid_bitmaps_def valid_bitmapQ_def bitmapQ_def s0H_internal_def + tcbQueueEmpty_def bitmapQ_no_L1_orphans_def bitmapQ_no_L2_orphans_def) + apply (rule conjI) + apply (clarsimp simp: ct_not_inQ_def obj_at'_def objBitsKO_def + s0H_internal_def s0_ptrs_aligned Low_tcbH_def) + apply (rule pspace_distinctD''[OF _ s0H_pspace_distinct', simplified s0H_internal_def]) + apply (simp add: objBitsKO_def) + apply (rule conjI) + apply (clarsimp simp: ct_idle_or_in_cur_domain'_def obj_at'_def tcb_in_cur_domain'_def + s0H_internal_def Low_tcbH_def Low_domain_def objBitsKO_def s0_ptrs_aligned) + apply (rule pspace_distinctD''[OF _ s0H_pspace_distinct', simplified s0H_internal_def]) + apply (simp add: objBitsKO_def) + apply (rule conjI) + apply (clarsimp simp: kdr_pspace_domain_valid) (* use axiomatization for now *) + apply (clarsimp simp: s0H_internal_def cteCaps_of_def untyped_ranges_zero_inv_def + dschDomain_def dschLength_def) + apply (clarsimp simp: newKernelState_def newKSDomSched) + apply (clarsimp simp: cur_tcb'_def obj_at'_def s0H_internal_def objBitsKO_def s0_ptrs_aligned) + apply (rule pspace_distinctD''[OF _ s0H_pspace_distinct', simplified s0H_internal_def]) + apply (simp add: objBitsKO_def) + done + +lemma kh0_pspace_dom: + "pspace_dom kh0 = {idle_tcb_ptr, High_tcb_ptr, Low_tcb_ptr, + High_pool_ptr, Low_pool_ptr, irq_cnode_ptr, ntfn_ptr} \ + irq_node_offs_range \ + page_offs_range shared_page_ptr_virt \ + cnode_offs_range Silc_cnode_ptr \ + cnode_offs_range High_cnode_ptr \ + cnode_offs_range Low_cnode_ptr \ + pt_offs_range riscv_global_pt_ptr \ + pt_offs_range High_pd_ptr \ + pt_offs_range Low_pd_ptr \ + pt_offs_range High_pt_ptr \ + pt_offs_range Low_pt_ptr" + apply (rule equalityI) + apply (simp add: dom_def pspace_dom_def) + apply clarsimp + apply (clarsimp simp: kh0_def obj_relation_cuts_def page_offs_in_range pt_offs_in_range + cnode_offs_in_range irq_node_offs_in_range s0_ptrs_aligned bit_simps + kh0_obj_def cte_map_def' caps_dom_length_10 + dest!: less_0x200_exists_ucast split: if_split_asm) + apply (clarsimp simp: pspace_dom_def dom_def) + apply (rule conjI) + apply (rule_tac x=idle_tcb_ptr in exI) + apply (clarsimp simp: kh0_def kh0_obj_def s0_ptr_defs image_def) + apply (rule conjI) + apply (rule_tac x=High_tcb_ptr in exI) + apply (clarsimp simp: kh0_def kh0_obj_def s0_ptr_defs image_def) + apply (rule conjI) + apply (rule_tac x=Low_tcb_ptr in exI) + apply (clarsimp simp: kh0_def kh0_obj_def s0_ptr_defs image_def) + apply (rule conjI) + apply (rule_tac x=High_pool_ptr in exI) + apply (clarsimp simp: kh0_def kh0_obj_def s0_ptr_defs image_def) + apply (rule conjI) + apply (rule_tac x=Low_pool_ptr in exI) + apply (clarsimp simp: kh0_def kh0_obj_def s0_ptr_defs image_def) + apply (rule conjI) + apply (rule_tac x=irq_cnode_ptr in exI) + apply (clarsimp simp: kh0_def kh0_obj_def s0_ptr_defs image_def cte_map_def) + apply (rule conjI) + apply (rule_tac x=ntfn_ptr in exI) + apply (clarsimp simp: kh0_def kh0_obj_def s0_ptr_defs image_def) + apply (rule conjI) + apply clarsimp + apply (rule_tac x=x in exI) + apply (drule offs_range_correct) + apply clarsimp + apply (force simp: kh0_def kh0_obj_def image_def cte_map_def') + apply (rule conjI) + apply clarsimp + apply (rule_tac x=shared_page_ptr_virt in exI) + apply (drule offs_range_correct) + apply (clarsimp simp: kh0_def kh0_obj_def image_def s0_ptr_defs cte_map_def' dom_caps bit_simps) + apply (rule_tac x="UCAST (9 \ 64) y" in exI) + apply clarsimp + apply word_bitwise + apply (rule conjI) + apply clarsimp + apply (rule_tac x=Silc_cnode_ptr in exI) + apply (drule offs_range_correct) + apply (force simp: kh0_def kh0_obj_def image_def s0_ptr_defs cte_map_def' dom_caps) + apply (rule conjI) + apply clarsimp + apply (rule_tac x=High_cnode_ptr in exI) + apply (drule offs_range_correct) + apply (force simp: kh0_def kh0_obj_def image_def s0_ptr_defs cte_map_def' dom_caps) + apply (rule conjI) + apply clarsimp + apply (rule_tac x=Low_cnode_ptr in exI) + apply (drule offs_range_correct) + apply (force simp: kh0_def kh0_obj_def image_def s0_ptr_defs cte_map_def' dom_caps) + apply (rule conjI) + apply clarsimp + apply (rule_tac x=riscv_global_pt_ptr in exI) + apply (drule offs_range_correct) + apply (force simp: kh0_def kh0_obj_def image_def s0_ptr_defs bit_simps) + apply (rule conjI) + apply clarsimp + apply (rule_tac x=High_pd_ptr in exI) + apply (drule offs_range_correct) + apply (force simp: kh0_def kh0_obj_def image_def s0_ptr_defs bit_simps) + apply (rule conjI) + apply clarsimp + apply (rule_tac x=Low_pd_ptr in exI) + apply (drule offs_range_correct) + apply (force simp: kh0_def kh0_obj_def image_def s0_ptr_defs bit_simps) + apply (rule conjI) + apply clarsimp + apply (rule_tac x=High_pt_ptr in exI) + apply (drule offs_range_correct) + apply (force simp: kh0_def kh0_obj_def image_def s0_ptr_defs bit_simps) + apply clarsimp + apply (rule_tac x=Low_pt_ptr in exI) + apply (drule offs_range_correct) + apply (force simp: kh0_def kh0_obj_def image_def s0_ptr_defs bit_simps) + done + + +lemma shiftl_shiftr_3_pt_index[simp]: + "ucast (((ucast (x :: pt_index) << 3) :: obj_ref) >> 3) = x" + apply (subst shiftl_shiftr_id) + apply simp + apply (cut_tac ucast_less[where x=x]) + apply (erule less_trans) + apply simp + apply simp + apply (rule ucast_ucast_id) + apply simp + done + +lemma mult_shiftr_id[simp]: + "length x = 10 \ of_bl x * (0x20 :: obj_ref) >> 5 = of_bl x" + apply (simp add: shiftl_t2n[symmetric, where n=5, simplified mult.commute, simplified] ) + apply (subst shiftl_shiftr_id) + apply simp + apply (rule less_trans) + apply (rule of_bl_length_less) + apply assumption + apply simp + apply simp + apply simp + done + +lemma to_bl_ucast_of_bl[simp]: + "length x = 10 \ to_bl ((ucast (of_bl x :: obj_ref)) :: 10 word) = x" + apply (subst ucast_of_bl_up) + apply (simp add: word_size) + apply (simp add: word_rep_drop) + done + +lemma is_aligned_shiftr_3[simp]: + "is_aligned (riscv_global_pt_ptr + (n << 3)) 3" + "is_aligned (High_pd_ptr + (n << 3)) 3" + "is_aligned (Low_pd_ptr + (n << 3)) 3" + "is_aligned (High_pt_ptr + (n << 3)) 3" + "is_aligned (Low_pt_ptr + (n << 3)) 3" + apply (rule is_aligned_add[OF is_aligned_weaken[OF s0_ptrs_aligned(1), where y=3, simplified bit_simps]]; fastforce) + apply (rule is_aligned_add[OF is_aligned_weaken[OF s0_ptrs_aligned(2), where y=3, simplified bit_simps]]; fastforce) + apply (rule is_aligned_add[OF is_aligned_weaken[OF s0_ptrs_aligned(3), where y=3, simplified bit_simps]]; fastforce) + apply (rule is_aligned_add[OF is_aligned_weaken[OF s0_ptrs_aligned(4), where y=3, simplified bit_simps]]; fastforce) + apply (rule is_aligned_add[OF is_aligned_weaken[OF s0_ptrs_aligned(5), where y=3, simplified bit_simps]]; fastforce) + done + +lemma s0_pspace_rel: + "pspace_relation (kheap s0_internal) kh0H" + apply (simp add: pspace_relation_def s0_internal_def s0H_internal_def kh0H_dom kh0_pspace_dom) + apply clarsimp + apply (drule kh0_SomeD) + apply (elim disjE) + apply (clarsimp simp: kh0_obj_def bit_simps dest!: less_0x200_exists_ucast) + defer + apply ((clarsimp simp: kh0_obj_def kh0H_obj_def bit_simps word_bits_def + fault_rel_optionation_def tcb_relation_cut_def + tcb_relation_def arch_tcb_relation_def the_nat_to_bl_simps + split del: if_split)+)[3] + prefer 13 + apply ((clarsimp simp: kh0_obj_def kh0H_all_obj_def bit_simps add.commute + pt_offs_max pt_offs_min pte_relation_def + split del: if_split, + clarsimp simp: s0_ptr_defs shared_page_ptr_phys_def addrFromPPtr_def pptrBaseOffset_def paddrBase_def + vmrights_map_def vm_read_only_def vm_read_write_def + kh0_obj_def kh0H_all_obj_def elf_index_value, + (clarsimp simp: bit_simps mask_def)?)+)[5] + apply (clarsimp simp: kh0_obj_def kh0H_obj_def well_formed_cnode_n_def + cte_relation_def cte_map_def bit_simps) + apply (clarsimp simp: kh0H_obj_def bit_simps ntfn_def other_obj_relation_def ntfn_relation_def) + defer + apply ((clarsimp simp: kh0_obj_def kh0H_obj_def other_obj_relation_def + asid_pool_relation_def comp_def inv_into_def2 other_aobj_relation_def, + rule ext, + clarsimp simp: asid_low_bits_of_def asid_low_bits_def High_asid_def, + word_bitwise, fastforce)+)[2] + apply (clarsimp simp: kh0H_obj_def kh0_obj_def cte_relation_def cte_map_def') + apply (cut_tac dom_caps(2))[1] + apply (frule_tac m=High_caps in domI) + apply (cut_tac x=y in cnode_offs_in_range(2), simp) + apply (clarsimp simp: cnode_offs_range_def kh0H_all_obj_def High_caps_def + the_nat_to_bl_simps vmrights_map_def vm_read_only_def + split: if_split_asm) + apply (clarsimp simp: kh0H_obj_def kh0_obj_def cte_relation_def cte_map_def') + apply (cut_tac dom_caps(3))[1] + apply (frule_tac m=Low_caps in domI) + apply (cut_tac x=y in cnode_offs_in_range(1), simp) + apply (clarsimp simp: cnode_offs_range_def kh0H_all_obj_def Low_caps_def + the_nat_to_bl_simps vmrights_map_def vm_read_write_def + split: if_split_asm) + apply (fastforce simp: kh0H_obj_def cte_map_def cte_relation_def well_formed_cnode_n_def + dest: irq_node_offs_range_correct split: if_split_asm) + apply (clarsimp simp: kh0H_obj_def kh0_obj_def cte_relation_def cte_map_def') + apply (cut_tac dom_caps(1))[1] + apply (frule_tac m=Silc_caps in domI) + apply (cut_tac x=y in cnode_offs_in_range(3), simp) + apply (clarsimp simp: cnode_offs_range_def kh0H_all_obj_def Silc_caps_def + the_nat_to_bl_simps vmrights_map_def vm_read_only_def + split: if_split_asm) + done + +lemma subtree_node_Some: + "m \ a \ b \ m a \ None" + by (erule subtree.cases) (auto simp: parentOf_def) + +lemma s0_srel: + "1 \ maxDomain \ (s0_internal, s0H_internal) \ state_relation" + apply (simp add: state_relation_def) + apply (intro conjI) + apply (simp add: s0_pspace_rel) + apply (simp add: s0_internal_def exst0_def s0H_internal_def sched_act_relation_def) + apply (clarsimp simp: s0_internal_def exst0_def s0H_internal_def + ready_queues_relation_def ready_queue_relation_def + list_queue_relation_def queue_end_valid_def + prev_queue_head_def inQ_def tcbQueueEmpty_def + projectKOs opt_map_def opt_pred_def + split: option.splits) + using kh0H_dom_tcb + apply (fastforce simp: kh0H_obj_def) + apply (clarsimp simp: s0_internal_def exst0_def s0H_internal_def ghost_relation_def) + apply (rule conjI) + apply (fastforce simp: kh0_def kh0_obj_def dest: kh0_SomeD) + apply clarsimp + apply (rule conjI) + apply clarsimp + apply (rule iffI) + apply clarsimp + apply (drule kh0_SomeD) + apply (clarsimp simp: irq_node_offs_in_range) + apply (fastforce simp: kh0_def well_formed_cnode_n_def empty_cnode_def dom_def) + apply clarsimp + apply (clarsimp simp: s0_ptr_defs) + apply (subgoal_tac "a \ irq_node_offs_range") + prefer 2 + apply (clarsimp simp: irq_node_offs_range_def s0_ptr_defs) + apply (erule_tac x="ucast (a - 0xFFFFFFC000003000 >> 5)" in allE) + apply (subst (asm) ucast_ucast_len) + apply (rule shiftr_less_t2n) + apply (rule word_less_sub_right) + apply (erule dual_order.strict_trans[rotated], clarsimp) + apply clarsimp + apply (simp add: shiftr_shiftl1) + apply (subst(asm) is_aligned_neg_mask_eq) + apply (rule aligned_sub_aligned[where n=5]) + apply simp + apply (simp add: is_aligned_def) + apply simp + apply simp + apply (intro conjI impI) + apply clarsimp + apply (rule iffI) + apply clarsimp + apply (drule kh0_SomeD) + apply (clarsimp simp: s0_ptr_defs kh0_obj_def) + apply (fastforce simp: kh0_def kh0_obj_def dom_def s0_ptr_defs Low_caps_def + well_formed_cnode_n_def empty_cnode_def) + apply clarsimp + apply (rule iffI) + apply clarsimp + apply (drule kh0_SomeD) + apply (clarsimp simp: s0_ptr_defs kh0_obj_def) + apply (fastforce simp: kh0_def kh0_obj_def dom_def s0_ptr_defs High_caps_def + well_formed_cnode_n_def empty_cnode_def) + apply clarsimp + apply (rule iffI) + apply clarsimp + apply (drule kh0_SomeD) + apply (clarsimp simp: s0_ptr_defs kh0_obj_def) + apply (fastforce simp: kh0_def kh0_obj_def dom_def s0_ptr_defs Silc_caps_def + well_formed_cnode_n_def empty_cnode_def) + apply clarsimp + apply (rule iffI) + apply clarsimp + apply (drule kh0_SomeD) + apply (clarsimp simp: s0_ptr_defs kh0_obj_def) + apply (fastforce simp: kh0_def kh0_obj_def dom_def s0_ptr_defs Silc_caps_def + well_formed_cnode_n_def empty_cnode_def) + apply clarsimp + apply (drule kh0_SomeD) + apply (clarsimp simp: s0_ptr_defs kh0_obj_def) + apply (clarsimp simp: s0H_internal_def cdt_relation_def) + apply (clarsimp simp: descendants_of'_def) + apply (frule subtree_parent) + apply (drule subtree_mdb_next) + apply (case_tac "x = 0") + apply (cut_tac s0H_valid_pspace') + apply (simp add: valid_pspace'_def valid_mdb'_def valid_mdb_ctes_def + parentOf_def isMDBParentOf_def kh0H_all_obj_def') + apply simp + apply (clarsimp simp: mdb_next_trancl_s0H) + apply (elim disjE, (clarsimp simp: parentOf_def isMDBParentOf_def kh0H_all_obj_def')+)[1] + apply (clarsimp simp: cdt_list_relation_def s0_internal_def exst0_def split: option.splits) + apply (clarsimp simp: next_slot_def) + apply (cut_tac p="(a, b)" and t="(const [])" and m="Map.empty" in next_not_child_NoneI) + apply fastforce + apply (simp add: next_sib_def) + apply (simp add: finite_depth_def) + apply simp + apply (clarsimp simp: revokable_relation_def) + apply (clarsimp simp: null_filter_def split: if_split_asm) + apply (drule s0_caps_of_state) + apply clarsimp + apply (elim disjE) + apply (clarsimp simp: s0H_internal_def s0_internal_def + cte_map_def' kh0H_all_obj_def' + split: if_split_asm)+ + apply (clarsimp simp: tcb_cnode_index_def ucast_bl[symmetric] + Low_tcb_cte_def Low_tcbH_def High_tcb_cte_def High_tcbH_def) + apply ((clarsimp simp: cte_map_def' s0H_internal_def s0_internal_def, + clarsimp simp: tcb_cnode_index_def ucast_bl[symmetric] + Low_tcb_cte_def Low_tcbH_def High_tcb_cte_def High_tcbH_def)+)[5] + apply (clarsimp simp: s0_internal_def s0H_internal_def arch_state0_def arch_state0H_def + arch_state_relation_def asid_high_bits_of_def asid_low_bits_def + High_asid_def Low_asid_def max_pt_level_def2 maxPTLevel_def comp_def + split: if_splits) + apply safe + apply (rule ext) + apply clarsimp + apply (word_bitwise, fastforce) + apply (rule ext) + apply (clarsimp split: if_splits) + subgoal for level + by (induct level; simp only: size_maxPTLevel[simplified maxPTLevel_def, symmetric] + bit0.size_inj max_pt_level_def2) + apply (clarsimp simp: s0_internal_def s0H_internal_def exst0_def cte_level_bits_def + interrupt_state_relation_def irq_state_relation_def) + apply (clarsimp simp: s0_internal_def exst0_def s0H_internal_def)+ + done + +definition + "s0H \ ((if ct_idle' s0H_internal then idle_context s0_internal else s0_context, s0H_internal), KernelExit)" + +lemma step_restrict_s0: + "1 \ maxDomain \ step_restrict s0" + supply option.case_cong[cong] if_cong[cong] + apply (clarsimp simp: step_restrict_def has_srel_state_def) + apply (rule_tac x="fst (fst s0H)" in exI) + apply (rule_tac x="snd (fst s0H)" in exI) + apply (rule_tac x="snd s0H" in exI) + apply (simp add: s0H_def lift_fst_rel_def lift_snd_rel_def s0_srel s0_def split del: if_split) + apply (rule conjI) + apply (clarsimp split: if_split_asm) + apply (rule conjI) + apply clarsimp + apply (frule ct_idle'_related[OF s0_srel s0H_invs]; solves simp) + apply clarsimp + apply (drule ct_idle_related[OF s0_srel]; simp) + apply (clarsimp simp: full_invs_if'_def s0H_invs) + apply (rule conjI) + apply (simp only: ex_abs_def) + apply (rule_tac x="s0_internal" in exI) + apply (simp only: einvs_s0 s0_srel) + apply (simp add: s0H_internal_def valid_domain_list'_def) + apply (clarsimp simp: ct_in_state'_def st_tcb_at'_def obj_at'_def + s0H_internal_def objBits_simps' s0_ptrs_aligned Low_tcbH_def) + apply (rule pspace_distinctD''[OF _ s0H_pspace_distinct', simplified s0H_internal_def]) + apply (simp add: objBits_simps') + done + +lemma Sys1_valid_initial_state_noenabled: + assumes domains: "1 \ maxDomain" + assumes utf_det: "\pl pr pxn tc ms s. det_inv InUserMode tc s \ einvs s \ context_matches_state pl pr pxn ms s \ ct_running s + \ (\x. utf (cur_thread s) pl pr pxn (tc, ms) = {x})" + assumes utf_non_empty: "\t pl pr pxn tc ms. utf t pl pr pxn (tc, ms) \ {}" + assumes utf_non_interrupt: "\t pl pr pxn tc ms e f g. (e,f,g) \ utf t pl pr pxn (tc, ms) \ e \ Some Interrupt" + assumes det_inv_invariant: "invariant_over_ADT_if det_inv utf" + assumes det_inv_s0: "det_inv KernelExit (cur_context s0_internal) s0_internal" + shows "valid_initial_state_noenabled det_inv utf s0_internal Sys1PAS timer_irq s0_context" + by (rule Sys1_valid_initial_state_noenabled[OF step_restrict_s0 utf_det utf_non_empty + utf_non_interrupt det_inv_invariant det_inv_s0 + ], + rule domains) + +end + +end diff --git a/run_tests b/run_tests index ea46e44a52..3512a7b841 100755 --- a/run_tests +++ b/run_tests @@ -40,7 +40,9 @@ EXCLUDE["ARM_HYP"]=[ "RefineOrphanage", "SimplExportAndRefine"] -EXCLUDE["ARM"]=[] +EXCLUDE["ARM"]=[ + "InfoFlow" # for development only +] EXCLUDE["X64"]=[ "Access", @@ -54,6 +56,7 @@ EXCLUDE["X64"]=[ ] EXCLUDE["RISCV64"]=[ + "InfoFlow", # for development only "AutoCorresSEL4", "DSpec", "DBaseRefine", @@ -64,7 +67,7 @@ EXCLUDE["RISCV64"]=[ EXCLUDE["AARCH64"]=[ # To be eliminated/refined as development progresses "ASepSpec", - "InfoFlow", + "InfoFlowCBase", # Tools and unrelated content, removed for development "AutoCorres",