diff --git a/Analysis.thy b/Analysis.thy new file mode 100644 index 0000000..043ddc2 --- /dev/null +++ b/Analysis.thy @@ -0,0 +1,210 @@ +(***************************************************************************** + * HOL-TestGen --- theorem-prover based test case generation + * http://www.brucker.ch/projects/hol-testgen/ + * + * UPF + * This file is part of HOL-TestGen. + * + * Copyright (c) 2005-2012 ETH Zurich, Switzerland + * 2008-2014 Achim D. Brucker, Germany + * 2009-2014 Université Paris-Sud, France + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * + * * Neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + ******************************************************************************) +(* $Id: Analysis.thy 10953 2014-11-24 11:23:40Z wolff $ *) + + +header{* Properties on Policies *} +theory + Analysis +imports + ParallelComposition + SeqComposition +begin + +text {* + In this theory, several standard policy properties are paraphrased in UPF terms. +*} + +section{* Basic Properties *} + +subsection{* A Policy Has no Gaps *} + +definition gap_free :: "('a \ 'b) \ bool" +where "gap_free p = (dom p = UNIV)" + +subsection{* Comparing Policies *} +text {* Policy p is more defined than q: *} +definition more_defined :: "('a \ 'b) \('a \ 'b) \bool" +where "more_defined p q = (dom q \ dom p)" + + +definition strictly_more_defined :: "('a \ 'b) \('a \ 'b) \bool" +where "strictly_more_defined p q = (dom q \ dom p)" + +lemma strictly_more_vs_more: "strictly_more_defined p q \ more_defined p q" +unfolding more_defined_def strictly_more_defined_def +by auto + +text{* Policy p is more permissive than q: *} +definition more_permissive :: "('a \ 'b) \ ('a \ 'b) \ bool" (infixl "\\<^sub>A" 60) +where " p \\<^sub>A q = (\ x. (case q x of \allow y\ \ (\ z. (p x = \allow z\)) + | \deny y\ \ True + | \ \ True))" + + +lemma more_permissive_refl : "p \\<^sub>A p " +unfolding more_permissive_def +by(auto split : option.split decision.split) + +lemma more_permissive_trans : "p \\<^sub>A p' \ p' \\<^sub>A p'' \ p \\<^sub>A p''" +unfolding more_permissive_def +apply(auto split : option.split decision.split) +apply(erule_tac x = x and + P = "\x. case p'' x of \ \ True + | \allow y\ \ \z. p' x = \allow z\ + | \deny y\ \ True" in allE) +apply(simp, elim exE) +by(erule_tac x = x in allE, simp) + + +text{* Policy p is more rejective than q: *} +definition more_rejective :: "('a \ 'b) \ ('a \ 'b) \ bool" (infixl "\\<^sub>D" 60) +where " p \\<^sub>D q = (\ x. (case q x of \deny y\ \ (\ z. (p x = \deny z\)) + | \allow y\ \ True + | \ \ True))" + + +lemma more_rejective_trans : "p \\<^sub>D p' \ p' \\<^sub>D p'' \ p \\<^sub>D p''" +unfolding more_rejective_def +apply(auto split : option.split decision.split) +apply(erule_tac x = x and + P = "\x. case p'' x of \ \ True + | \allow y\ \ True + | \deny y\ \ \z. p' x = \deny z\" in allE) +apply(simp, elim exE) +by(erule_tac x = x in allE, simp) + + + +lemma more_rejective_refl : "p \\<^sub>D p " +unfolding more_rejective_def +by(auto split : option.split decision.split) + + +lemma "A\<^sub>f f \\<^sub>A p" + unfolding more_permissive_def allow_all_fun_def allow_pfun_def + by(auto split: option.split decision.split) + +lemma "A\<^sub>I \\<^sub>A p" + unfolding more_permissive_def allow_all_fun_def allow_pfun_def allow_all_id_def + by(auto split: option.split decision.split) + +section{* Combined Data-Policy Refinement *} + +definition policy_refinement :: + "('a \ 'b) \ ('a' \ 'a) \('b' \ 'b) \ ('a' \ 'b') \ bool" + ("_ \\<^bsub>_\<^esub>\<^sub>,\<^bsub>_\<^esub> _" [50,50,50,50]50) +where "p \\<^bsub>abs\<^sub>a\<^esub>\<^sub>,\<^bsub>abs\<^sub>b\<^esub> q \ + (\ a. case p a of + \ \ True + | \allow y\ \ (\ a'\{x. abs\<^sub>a x=a}. + \ b'. q a' = \allow b'\ + \ abs\<^sub>b b' = y) + | \deny y\ \ (\ a'\{x. abs\<^sub>a x=a}. + \ b'. q a' = \deny b'\ + \ abs\<^sub>b b' = y))" + +theorem polref_refl: "p \\<^bsub>id\<^esub>\<^sub>,\<^bsub>id\<^esub> p" +unfolding policy_refinement_def +by(auto split: option.split decision.split) + +theorem polref_trans: +assumes A: "p \\<^bsub>f\<^esub>\<^sub>,\<^bsub>g\<^esub> p'" +and B: "p' \\<^bsub>f'\<^esub>\<^sub>,\<^bsub>g'\<^esub> p''" +shows "p \\<^bsub>f o f'\<^esub>\<^sub>,\<^bsub>g o g'\<^esub> p''" +apply(insert A B) +unfolding policy_refinement_def +apply(auto split: option.split decision.split simp: o_def) +apply(erule_tac x="f (f' a')" in allE, simp) +apply(erule_tac x="f' a'" in allE, auto) +apply(erule_tac x=" (f' a')" in allE, auto) +apply(erule_tac x="f (f' a')" in allE, simp) +apply(erule_tac x="f' a'" in allE, auto) +apply(erule_tac x=" (f' a')" in allE, auto) +done + + +section {* Equivalence of Policies *} +subsubsection{* Equivalence over domain D *} + +definition p_eq_dom :: "('a \ 'b) \ 'a set \ ('a \ 'b) \bool" ("_ \\<^bsub>_\<^esub> _" [60,60,60]60) +where "p \\<^bsub>D\<^esub> q = (\x\D. p x = q x)" + +text{* p and q have no conflicts *} +definition no_conflicts :: "('a \ 'b) \('a \ 'b) \bool" where + "no_conflicts p q = (dom p = dom q \ (\x\(dom p). + (case p x of \allow y\ \ (\z. q x = \allow z\) + | \deny y\ \ (\z. q x = \deny z\))))" + +lemma policy_eq: + assumes p_over_qA: "p \\<^sub>A q " + and q_over_pA: "q \\<^sub>A p" + and p_over_qD: "q \\<^sub>D p" + and q_over_pD: "p \\<^sub>D q" + and dom_eq: "dom p = dom q" +shows "no_conflicts p q" + apply (insert p_over_qA q_over_pA p_over_qD q_over_pD dom_eq) + apply (simp add: no_conflicts_def more_permissive_def more_rejective_def + split: option.splits decision.splits) + apply (safe) + apply (metis domI domIff dom_eq) + apply (metis)+ +done + +subsection{* Miscellaneous *} + +lemma dom_inter: "\dom p \ dom q = {}; p x = \y\\ \ q x = \" + by (auto) + +lemma dom_eq: "dom p \ dom q = {} \ p \\<^sub>A q = p \\<^sub>D q" + unfolding override_A_def override_D_def + by (rule ext, auto simp: dom_def split: prod.splits option.splits decision.splits ) + +lemma dom_split_alt_def : "(f, g) \ p = (dom(p \ Allow) \ (A\<^sub>f f)) \ (dom(p \ Deny) \ (D\<^sub>f g))" + apply (rule ext) + apply (simp add: dom_split2_def Allow_def Deny_def dom_restrict_def + deny_all_fun_def allow_all_fun_def map_add_def) + apply (simp split: option.splits decision.splits) + apply (auto simp: map_add_def o_def deny_pfun_def ran_restrict_def image_def) +done + +end diff --git a/ElementaryPolicies.thy b/ElementaryPolicies.thy new file mode 100644 index 0000000..c64ffe4 --- /dev/null +++ b/ElementaryPolicies.thy @@ -0,0 +1,338 @@ +(***************************************************************************** + * HOL-TestGen --- theorem-prover based test case generation + * http://www.brucker.ch/projects/hol-testgen/ + * + * UPF + * This file is part of HOL-TestGen. + * + * Copyright (c) 2005-2012 ETH Zurich, Switzerland + * 2008-2014 Achim D. Brucker, Germany + * 2009-2014 Université Paris-Sud, France + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * + * * Neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + ******************************************************************************) +(* $Id: ElementaryPolicies.thy 10945 2014-11-21 12:50:43Z wolff $ *) + +header{* Elementary Policies *} +theory + ElementaryPolicies +imports + UPFCore +begin +text{* + In this theory, we introduce the elementary policies of UPF that build the basis + for more complex policies. These complex policies, respectively, embedding of + well-known access control or security models, are build by composing the elementary + policies defined in this theory. +*} + +section{* The Core Policy Combinators: Allow and Deny Everything *} + +definition + deny_pfun :: "('\ \'\) \ ('\ \ '\)" ("AllD") + where + "deny_pfun pf \ (\ x. case pf x of + \y\ \ \deny (y)\ + |\ \ \)" + +definition + allow_pfun :: "('\ \'\) \ ('\ \ '\)" ( "AllA") + where + "allow_pfun pf \ (\ x. case pf x of + \y\ \ \allow (y)\ + |\ \ \)" + +syntax (xsymbols) + "_allow_pfun" :: "('\ \'\) \ ('\ \ '\)" ("A\<^sub>p") +translations + "A\<^sub>p f" \ "AllA f" + +syntax (xsymbols) + "_deny_pfun" :: "('\ \'\) \ ('\ \ '\)" ("D\<^sub>p") +translations + "D\<^sub>p f" \ "AllD f" + +notation (xsymbols) + "deny_pfun" (binder "\D" 10) and + "allow_pfun" (binder "\A" 10) + +lemma AllD_norm[simp]: "deny_pfun (id o (\x. \x\)) = (\Dx. \x\)" + by(simp add:id_def comp_def) + +lemma AllD_norm2[simp]: "deny_pfun (Some o id) = (\Dx. \x\)" + by(simp add:id_def comp_def) + +lemma AllA_norm[simp]: "allow_pfun (id o Some) = (\Ax. \x\)" + by(simp add:id_def comp_def) + +lemma AllA_norm2[simp]: "allow_pfun (Some o id) = (\Ax. \x\)" + by(simp add:id_def comp_def) + +lemma AllA_apply[simp]: "(\Ax. Some (P x)) x = \allow (P x)\" + by(simp add:allow_pfun_def) + +lemma AllD_apply[simp]: "(\Dx. Some (P x)) x = \deny (P x)\" + by(simp add:deny_pfun_def) + +lemma neq_Allow_Deny: "pf \ \ \ (deny_pfun pf) \ (allow_pfun pf)" + apply (erule contrapos_nn) + apply (rule ext) + apply (drule_tac x=x in fun_cong) + apply (auto simp: deny_pfun_def allow_pfun_def) + apply (case_tac "pf x = \") + apply (auto) +done + +section{* Common Instances *} + +definition allow_all_fun :: "('\ \ '\) \ ('\ \ '\)" ("A\<^sub>f") + where "allow_all_fun f = allow_pfun (Some o f)" + +definition deny_all_fun :: "('\ \ '\) \ ('\ \ '\)" ("D\<^sub>f") + where "deny_all_fun f \ deny_pfun (Some o f)" + +definition + deny_all_id :: "'\ \ '\" ("D\<^sub>I") where + "deny_all_id \ deny_pfun (id o Some)" + +definition + allow_all_id :: "'\ \ '\" ("A\<^sub>I") where + "allow_all_id \ allow_pfun (id o Some)" + +definition + allow_all :: "('\ \ unit)" ("A\<^sub>U") where + "allow_all p = \allow ()\" + +definition + deny_all :: "('\ \ unit)" ("D\<^sub>U") where + "deny_all p = \deny ()\" + +text{* ... and resulting properties: *} + +lemma "A\<^sub>I \ empty = A\<^sub>I" + apply simp + done + +lemma "A\<^sub>f f \ empty = A\<^sub>f f" + apply simp + done + +lemma "allow_pfun empty = empty" + apply (rule ext) + apply (simp add: allow_pfun_def) + done + +lemma allow_left_cancel :"dom pf = UNIV \ (allow_pfun pf) \ x = (allow_pfun pf)" + apply (rule ext)+ + apply (auto simp: allow_pfun_def option.splits) + done + + +lemma deny_left_cancel :"dom pf = UNIV \ (deny_pfun pf) \ x = (deny_pfun pf)" + apply (rule ext)+ + apply (auto simp: deny_pfun_def option.splits) + done + +section{* Domain, Range, and Restrictions *} + +text{* + Since policies are essentially maps, we inherit the basic definitions for + domain and range on Maps: \\ + \verb+Map.dom_def+ : @{thm Map.dom_def} \\ + whereas range is just an abrreviation for image: + \begin{verbatim} + abbreviation range :: "('a => 'b) => 'b set" + where -- "of function" "range f == f ` UNIV" + \end{verbatim} + As a consequence, we inherit the following properties on + policies: + \begin{itemize} + \item \verb+Map.domD+ @{thm Map.domD} + \item\verb+Map.domI+ @{thm Map.domI} + \item\verb+Map.domIff+ @{thm Map.domIff} + \item\verb+Map.dom_const+ @{thm Map.dom_const} + \item\verb+Map.dom_def+ @{thm Map.dom_def} + \item\verb+Map.dom_empty+ @{thm Map.dom_empty} + \item\verb+Map.dom_eq_empty_conv+ @{thm Map.dom_eq_empty_conv} + \item\verb+Map.dom_eq_singleton_conv+ @{thm Map.dom_eq_singleton_conv} + \item\verb+Map.dom_fun_upd+ @{thm Map.dom_fun_upd} + \item\verb+Map.dom_if+ @{thm Map.dom_if} + \item\verb+Map.dom_map_add+ @{thm Map.dom_map_add} + \end{itemize} +*} + +text{* + However, some properties are specific to policy concepts: +*} +lemma sub_ran : "ran p \ Allow \ Deny" + apply (auto simp: Allow_def Deny_def ran_def full_SetCompr_eq[symmetric]) + apply (case_tac "x") + apply (simp_all) + apply (erule_tac x="\" in allE) + apply (simp) + done + +lemma dom_allow_pfun [simp]:"dom(allow_pfun f) = dom f" + apply (auto simp: allow_pfun_def) + apply (case_tac "f x", simp_all) + done + +lemma dom_allow_all: "dom(A\<^sub>f f) = UNIV" + by(auto simp: allow_all_fun_def o_def) + +lemma dom_deny_pfun [simp]:"dom(deny_pfun f) = dom f" + apply (auto simp: deny_pfun_def) + apply (case_tac "f x") + apply (simp_all) + done + +lemma dom_deny_all: " dom(D\<^sub>f f) = UNIV" + by(auto simp: deny_all_fun_def o_def) + +lemma ran_allow_pfun [simp]:"ran(allow_pfun f) = allow `(ran f)" + apply (simp add: allow_pfun_def ran_def) + apply (rule set_eqI) + apply (auto) + apply (case_tac "f a") + apply (auto simp: image_def) + apply (rule_tac x=a in exI) + apply (simp) +done + +lemma ran_allow_all: "ran(A\<^sub>f id) = Allow" + apply (simp add: allow_all_fun_def Allow_def o_def) + apply (rule set_eqI) + apply (auto simp: image_def ran_def) +done + +lemma ran_deny_pfun[simp]: "ran(deny_pfun f) = deny ` (ran f)" + apply (simp add: deny_pfun_def ran_def) + apply (rule set_eqI) + apply (auto) + apply (case_tac "f a") + apply (auto simp: image_def) + apply (rule_tac x=a in exI) + apply (simp) +done + +lemma ran_deny_all: "ran(D\<^sub>f id) = Deny" + apply (simp add: deny_all_fun_def Deny_def o_def) + apply (rule set_eqI) + apply (auto simp: image_def ran_def) +done + + +text{* + Reasoning over \verb+dom+ is most crucial since it paves the way for simplification and + reordering of policies composed by override (i.e. by the normal left-to-right rule composition + method. + \begin{itemize} + \item \verb+Map.dom_map_add+ @{thm Map.dom_map_add} + \item \verb+Map.inj_on_map_add_dom+ @{thm Map.inj_on_map_add_dom} + \item \verb+Map.map_add_comm+ @{thm Map.map_add_comm} + \item \verb+Map.map_add_dom_app_simps(1)+ @{thm Map.map_add_dom_app_simps(1)} + \item \verb+Map.map_add_dom_app_simps(2)+ @{thm Map.map_add_dom_app_simps(2)} + \item \verb+Map.map_add_dom_app_simps(3)+ @{thm Map.map_add_dom_app_simps(3)} + \item \verb+Map.map_add_upd_left+ @{thm Map.map_add_upd_left} + \end{itemize} + The latter rule also applies to allow- and deny-override. +*} + +definition dom_restrict :: "['\ set, '\\'\] \ '\\'\" (infixr "\" 55) +where "S \ p \ (\x. if x \ S then p x else \)" + +lemma dom_dom_restrict[simp] : "dom(S \ p) = S \ dom p" + apply (auto simp: dom_restrict_def) + apply (case_tac "x \ S") + apply (simp_all) + apply (case_tac "x \ S") + apply (simp_all) +done + +lemma dom_restrict_idem[simp] : "(dom p) \ p = p" + apply (rule ext) + apply (auto simp: dom_restrict_def + dest: neq_commute[THEN iffD1,THEN not_None_eq [THEN iffD1]]) +done + +lemma dom_restrict_inter[simp] : "T \ S \ p = T \ S \ p" + apply (rule ext) + apply (auto simp: dom_restrict_def + dest: neq_commute[THEN iffD1,THEN not_None_eq [THEN iffD1]]) +done + +definition ran_restrict :: "['\\'\,'\ decision set] \ '\ \'\" (infixr "\" 55) +where "p \ S \ (\x. if p x \ (Some`S) then p x else \)" + +definition ran_restrict2 :: "['\\'\,'\ decision set] \ '\ \'\" (infixr "\2" 55) +where "p \2 S \ (\x. if (the (p x)) \ (S) then p x else \)" + +lemma "ran_restrict = ran_restrict2" + apply (rule ext)+ + apply (simp add: ran_restrict_def ran_restrict2_def) + apply (case_tac "x xb") + apply simp_all + apply (metis inj_Some inj_image_mem_iff) +done + + + +lemma ran_ran_restrict[simp] : "ran(p \ S) = S \ ran p" + by(auto simp: ran_restrict_def image_def ran_def) + +lemma ran_restrict_idem[simp] : "p \ (ran p) = p" + apply (rule ext) + apply (auto simp: ran_restrict_def image_def Ball_def ran_def) + apply (erule contrapos_pp) + apply (auto dest!: neq_commute[THEN iffD1,THEN not_None_eq [THEN iffD1]]) +done + +lemma ran_restrict_inter[simp] : "(p \ S) \ T = p \ T \ S" + apply (rule ext) + apply (auto simp: ran_restrict_def + dest: neq_commute[THEN iffD1,THEN not_None_eq [THEN iffD1]]) +done + +lemma ran_gen_A[simp] : "(\Ax. \P x\) \ Allow = (\Ax. \P x\)" + apply (rule ext) + apply (auto simp: Allow_def ran_restrict_def) +done + +lemma ran_gen_D[simp] : "(\Dx. \P x\) \ Deny = (\Dx. \P x\)" + apply (rule ext) + apply (auto simp: Deny_def ran_restrict_def) +done + +lemmas ElementaryPoliciesDefs = deny_pfun_def allow_pfun_def allow_all_fun_def deny_all_fun_def + allow_all_id_def deny_all_id_def allow_all_def deny_all_def + dom_restrict_def ran_restrict_def + +end diff --git a/Monads.thy b/Monads.thy new file mode 100644 index 0000000..c0affba --- /dev/null +++ b/Monads.thy @@ -0,0 +1,576 @@ +(***************************************************************************** + * HOL-TestGen --- theorem-prover based test case generation + * http://www.brucker.ch/projects/hol-testgen/ + * + * Monads.thy --- a base testing theory for sequential computations. + * This file is part of HOL-TestGen. + * + * Copyright (c) 2005-2012 ETH Zurich, Switzerland + * 2009-2014 Univ. Paris-Sud, France + * 2009-2014 Achim D. Brucker, Germany + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * + * * Neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + ******************************************************************************) +(* $Id: Monads.thy 10922 2014-11-10 15:41:49Z wolff $ *) + +header {* Basic Monad Theory for Sequential Computations *} +theory + Monads +imports + Main +begin + +section{* General Framework for Monad-based Sequence-Test *} +text{* + As such, Higher-order Logic as a purely functional specification formalism has no built-in + mechanism for state and state-transitions. Forms of testing involving state require therefore + explicit mechanisms for their treatment inside the logic; a well-known technique to model + states inside purely functional languages are \emph{monads} made popular by Wadler and Moggi + and extensively used in Haskell. \HOL is powerful enough to represent the most important + standard monads; however, it is not possible to represent monads as such due to well-known + limitations of the Hindley-Milner type-system. + + Here is a variant for state-exception monads, that models precisely transition functions with + preconditions. Next, we declare the state-backtrack-monad. In all of them, our concept of + i/o-stepping functions can be formulated; these are functions mapping input to a given monad. + Later on, we will build the usual concepts of: + \begin{enumerate} + \item deterministic i/o automata, + \item non-deterministic i/o automata, and + \item labelled transition systems (LTS) + \end{enumerate} +*} + +subsection{* State Exception Monads *} +type_synonym ('o, '\) MON\<^sub>S\<^sub>E = "'\ \ ('o \ '\)" + +definition bind_SE :: "('o,'\)MON\<^sub>S\<^sub>E \ ('o \ ('o','\)MON\<^sub>S\<^sub>E) \ ('o','\)MON\<^sub>S\<^sub>E" +where "bind_SE f g = (\\. case f \ of None \ None + | Some (out, \') \ g out \')" + +notation bind_SE ("bind\<^sub>S\<^sub>E") +syntax (xsymbols) + "_bind_SE" :: "[pttrn,('o,'\)MON\<^sub>S\<^sub>E,('o','\)MON\<^sub>S\<^sub>E] \ ('o','\)MON\<^sub>S\<^sub>E" + ("(2 _ \ _; _)" [5,8,8]8) +translations + "x \ f; g" \ "CONST bind_SE f (% x . g)" + +definition unit_SE :: "'o \ ('o, '\)MON\<^sub>S\<^sub>E" ("(return _)" 8) +where "unit_SE e = (\\. Some(e,\))" +notation unit_SE ("unit\<^sub>S\<^sub>E") + +definition fail\<^sub>S\<^sub>E :: "('o, '\)MON\<^sub>S\<^sub>E" +where "fail\<^sub>S\<^sub>E = (\\. None)" +notation fail\<^sub>S\<^sub>E ("fail\<^sub>S\<^sub>E") + +definition assert_SE :: "('\ \ bool) \ (bool, '\)MON\<^sub>S\<^sub>E" +where "assert_SE P = (\\. if P \ then Some(True,\) else None)" +notation assert_SE ("assert\<^sub>S\<^sub>E") + +definition assume_SE :: "('\ \ bool) \ (unit, '\)MON\<^sub>S\<^sub>E" +where "assume_SE P = (\\. if \\ . P \ then Some((), SOME \ . P \) else None)" +notation assume_SE ("assume\<^sub>S\<^sub>E") + +definition if_SE :: "['\ \ bool, ('\, '\)MON\<^sub>S\<^sub>E, ('\, '\)MON\<^sub>S\<^sub>E] \ ('\, '\)MON\<^sub>S\<^sub>E" +where "if_SE c E F = (\\. if c \ then E \ else F \)" +notation if_SE ("if\<^sub>S\<^sub>E") + +text{* + The standard monad theorems about unit and associativity: +*} + +lemma bind_left_unit : "(x \ return a; k) = k" + apply (simp add: unit_SE_def bind_SE_def) +done + +lemma bind_right_unit: "(x \ m; return x) = m" + apply (simp add: unit_SE_def bind_SE_def) + apply (rule ext) + apply (case_tac "m \") + apply ( simp_all) +done + +lemma bind_assoc: "(y \ (x \ m; k); h) = (x \ m; (y \ k; h))" + apply (simp add: unit_SE_def bind_SE_def) + apply (rule ext) + apply (case_tac "m \", simp_all) + apply (case_tac "a", simp_all) +done + +text{* + In order to express test-sequences also on the object-level and to make our theory amenable to + formal reasoning over test-sequences, we represent them as lists of input and generalize the + bind-operator of the state-exception monad accordingly. The approach is straightforward, but + comes with a price: we have to encapsulate all input and output data into one type. Assume that + we have a typed interface to a module with the operations $op_1$, $op_2$, \ldots, $op_n$ with + the inputs $\iota_1$, $\iota_2$, \ldots, $\iota_n$ (outputs are treated analogously). Then we + can encode for this interface the general input - type: + \begin{displaymath} + \texttt{datatype}\ \texttt{in}\ =\ op_1\ ::\ \iota_1\ |\ ...\ |\ \iota_n + \end{displaymath} + Obviously, we loose some type-safety in this approach; we have to express that in traces only + \emph{corresponding} input and output belonging to the same operation will occur; this form + of side-conditions have to be expressed inside \HOL. From the user perspective, this will not + make much difference, since junk-data resulting from too weak typing can be ruled out by adopted + front-ends. +*} + +text{* + In order to express test-sequences also on the object-level and to make our theory amenable to + formal reasoning over test-sequences, we represent them as lists of input and generalize the + bind-operator of the state-exception monad accordingly. Thus, the notion of test-sequence + is mapped to the notion of a \emph{computation}, a semantic notion; at times we will use + reifications of computations, \ie{} a data-type in order to make computation amenable to + case-splitting and meta-theoretic reasoning. To this end, we have to encapsulate all input + and output data into one type. Assume that we have a typed interface to a module with + the operations $op_1$, $op_2$, \ldots, $op_n$ with the inputs $\iota_1$, $\iota_2$, \ldots, + $\iota_n$ (outputs are treated analogously). + Then we can encode for this interface the general input - type: + \begin{displaymath} + \texttt{datatype}\ \texttt{in}\ =\ op_1\ ::\ \iota_1\ |\ ...\ |\ \iota_n + \end{displaymath} + Obviously, we loose some type-safety in this approach; we have to express + that in traces only \emph{corresponding} input and output belonging to the + same operation will occur; this form of side-conditions have to be expressed + inside \HOL. From the user perspective, this will not make much difference, + since junk-data resulting from too weak typing can be ruled out by adopted + front-ends. *} + + +text{* Note that the subsequent notion of a test-sequence allows the io stepping +function (and the special case of a program under test) to stop execution +\emph{within} the sequence; such premature terminations are characterized by an +output list which is shorter than the input list. Note that our primary +notion of multiple execution ignores failure and reports failure +steps only by missing results ... *} + + +fun mbind :: "'\ list \ ('\ \ ('o,'\) MON\<^sub>S\<^sub>E) \ ('o list,'\) MON\<^sub>S\<^sub>E" +where "mbind [] iostep \ = Some([], \)" | + "mbind (a#H) iostep \ = + (case iostep a \ of + None \ Some([], \) + | Some (out, \') \ (case mbind H iostep \' of + None \ Some([out],\') + | Some(outs,\'') \ Some(out#outs,\'')))" + +text{* As mentioned, this definition is fail-safe; in case of an exception, +the current state is maintained, no result is reported. +An alternative is the fail-strict variant @{text "mbind'"} defined below. *} + +lemma mbind_unit [simp]: "mbind [] f = (return [])" + by(rule ext, simp add: unit_SE_def) + +lemma mbind_nofailure [simp]: "mbind S f \ \ None" + apply (rule_tac x=\ in spec) + apply (induct S) + apply (auto simp:unit_SE_def) + apply (case_tac "f a x") + apply ( auto) + apply (erule_tac x="b" in allE) + apply (erule exE) + apply (erule exE) + apply (simp) +done + + +text{* The fail-strict version of @{text mbind'} looks as follows: *} +fun mbind' :: "'\ list \ ('\ \ ('o,'\) MON\<^sub>S\<^sub>E) \ ('o list,'\) MON\<^sub>S\<^sub>E" +where "mbind' [] iostep \ = Some([], \)" | + "mbind' (a#H) iostep \ = + (case iostep a \ of + None \ None + | Some (out, \') \ (case mbind H iostep \' of + None \ None (* fail-strict *) + | Some(outs,\'') \ Some(out#outs,\'')))" + +text{* + mbind' as failure strict operator can be seen as a foldr on bind---if the types would + match \ldots +*} + +definition try_SE :: "('o,'\) MON\<^sub>S\<^sub>E \ ('o option,'\) MON\<^sub>S\<^sub>E" +where "try_SE ioprog = (\\. case ioprog \ of + None \ Some(None, \) + | Some(outs, \') \ Some(Some outs, \'))" +text{* In contrast @{term mbind} as a failure safe operator can roughly be seen + as a @{term foldr} on bind - try: + @{text "m1 ; try m2 ; try m3; ..."}. Note, that the rough equivalence only holds for + certain predicates in the sequence - length equivalence modulo None, + for example. However, if a conditional is added, the equivalence + can be made precise: *} + + +lemma mbind_try: + "(x \ mbind (a#S) F; M x) = + (a' \ try_SE(F a); + if a' = None + then (M []) + else (x \ mbind S F; M (the a' # x)))" + apply (rule ext) + apply (simp add: bind_SE_def try_SE_def) + apply (case_tac "F a x") + apply (auto) + apply (simp add: bind_SE_def try_SE_def) + apply (case_tac "mbind S F b") + apply (auto) +done + +text{* On this basis, a symbolic evaluation scheme can be established + that reduces @{term mbind}-code to @{term try_SE}-code and If-cascades. *} + + +definition alt_SE :: "[('o, '\)MON\<^sub>S\<^sub>E, ('o, '\)MON\<^sub>S\<^sub>E] \ ('o, '\)MON\<^sub>S\<^sub>E" (infixl "\\<^sub>S\<^sub>E" 10) +where "(f \\<^sub>S\<^sub>E g) = (\ \. case f \ of None \ g \ + | Some H \ Some H)" + +definition malt_SE :: "('o, '\)MON\<^sub>S\<^sub>E list \ ('o, '\)MON\<^sub>S\<^sub>E" +where "malt_SE S = foldr alt_SE S fail\<^sub>S\<^sub>E" +notation malt_SE ("\\<^sub>S\<^sub>E") + +lemma malt_SE_mt [simp]: "\\<^sub>S\<^sub>E [] = fail\<^sub>S\<^sub>E" +by(simp add: malt_SE_def) + +lemma malt_SE_cons [simp]: "\\<^sub>S\<^sub>E (a # S) = (a \\<^sub>S\<^sub>E (\\<^sub>S\<^sub>E S))" +by(simp add: malt_SE_def) + +subsection{* State-Backtrack Monads *} +text{*This subsection is still rudimentary and as such an interesting + formal analogue to the previous monad definitions. It is doubtful that it is + interesting for testing and as a computational structure at all. + Clearly more relevant is ``sequence'' instead of ``set,'' which would + rephrase Isabelle's internal tactic concept. +*} + + +type_synonym ('o, '\) MON\<^sub>S\<^sub>B = "'\ \ ('o \ '\) set" + +definition bind_SB :: "('o, '\)MON\<^sub>S\<^sub>B \ ('o \ ('o', '\)MON\<^sub>S\<^sub>B) \ ('o', '\)MON\<^sub>S\<^sub>B" +where "bind_SB f g \ = \ ((\(out, \). (g out \)) ` (f \))" +notation bind_SB ("bind\<^sub>S\<^sub>B") + +definition unit_SB :: "'o \ ('o, '\)MON\<^sub>S\<^sub>B" ("(returns _)" 8) +where "unit_SB e = (\\. {(e,\)})" +notation unit_SB ("unit\<^sub>S\<^sub>B") + +syntax (xsymbols) "_bind_SB" :: "[pttrn,('o,'\)MON\<^sub>S\<^sub>B,('o','\)MON\<^sub>S\<^sub>B] \ ('o','\)MON\<^sub>S\<^sub>B" + ("(2 _ := _; _)" [5,8,8]8) +translations + "x := f; g" \ "CONST bind_SB f (% x . g)" + +lemma bind_left_unit_SB : "(x := returns a; m) = m" + apply (rule ext) + apply (simp add: unit_SB_def bind_SB_def) +done + +lemma bind_right_unit_SB: "(x := m; returns x) = m" + apply (rule ext) + apply (simp add: unit_SB_def bind_SB_def) +done + +lemma bind_assoc_SB: "(y := (x := m; k); h) = (x := m; (y := k; h))" + apply (rule ext) + apply (simp add: unit_SB_def bind_SB_def split_def) +done + +subsection{* State Backtrack Exception Monad *} +text{* + The following combination of the previous two Monad-Constructions allows for the semantic + foundation of a simple generic assertion language in the style of Schirmer's Simpl-Language or + Rustan Leino's Boogie-PL language. The key is to use the exceptional element None for violations + of the assert-statement. +*} +type_synonym ('o, '\) MON\<^sub>S\<^sub>B\<^sub>E = "'\ \ (('o \ '\) set) option" + +definition bind_SBE :: "('o,'\)MON\<^sub>S\<^sub>B\<^sub>E \ ('o \ ('o','\)MON\<^sub>S\<^sub>B\<^sub>E) \ ('o','\)MON\<^sub>S\<^sub>B\<^sub>E" +where "bind_SBE f g = (\\. case f \ of None \ None + | Some S \ (let S' = (\(out, \'). g out \') ` S + in if None \ S' then None + else Some(\ (the ` S'))))" + +syntax (xsymbols) "_bind_SBE" :: "[pttrn,('o,'\)MON\<^sub>S\<^sub>B\<^sub>E,('o','\)MON\<^sub>S\<^sub>B\<^sub>E] \ ('o','\)MON\<^sub>S\<^sub>B\<^sub>E" + ("(2 _ :\ _; _)" [5,8,8]8) +translations + "x :\ f; g" \ "CONST bind_SBE f (% x . g)" + +definition unit_SBE :: "'o \ ('o, '\)MON\<^sub>S\<^sub>B\<^sub>E" ("(returning _)" 8) +where "unit_SBE e = (\\. Some({(e,\)}))" + +definition assert_SBE :: "('\ \ bool) \ (unit, '\)MON\<^sub>S\<^sub>B\<^sub>E" +where "assert_SBE e = (\\. if e \ then Some({((),\)}) + else None)" +notation assert_SBE ("assert\<^sub>S\<^sub>B\<^sub>E") + +definition assume_SBE :: "('\ \ bool) \ (unit, '\)MON\<^sub>S\<^sub>B\<^sub>E" +where "assume_SBE e = (\\. if e \ then Some({((),\)}) + else Some {})" +notation assume_SBE ("assume\<^sub>S\<^sub>B\<^sub>E") + +definition havoc_SBE :: " (unit, '\)MON\<^sub>S\<^sub>B\<^sub>E" +where "havoc_SBE = (\\. Some({x. True}))" +notation havoc_SBE ("havoc\<^sub>S\<^sub>B\<^sub>E") + +lemma bind_left_unit_SBE : "(x :\ returning a; m) = m" + apply (rule ext) + apply (simp add: unit_SBE_def bind_SBE_def) +done + +lemma bind_right_unit_SBE: "(x :\ m; returning x) = m" + apply (rule ext) + apply (simp add: unit_SBE_def bind_SBE_def) + apply (case_tac "m x") + apply (simp_all add:Let_def) + apply (rule HOL.ccontr) + apply (simp add: Set.image_iff) +done + +lemmas aux = trans[OF HOL.neq_commute,OF Option.not_None_eq] + +lemma bind_assoc_SBE: "(y :\ (x :\ m; k); h) = (x :\ m; (y :\ k; h))" +proof (rule ext, simp add: unit_SBE_def bind_SBE_def, + case_tac "m x", simp_all add: Let_def Set.image_iff, safe) + case goal1 then show ?case + by(rule_tac x="(a, b)" in bexI, simp_all) +next + case goal2 then show ?case + apply (rule_tac x="(aa, b)" in bexI, simp_all add:split_def) + apply (erule_tac x="(aa,b)" in ballE) + apply (auto simp: aux image_def split_def intro!: rev_bexI) + done +next + case goal3 then show ?case + by(rule_tac x="(a, b)" in bexI, simp_all) +next + case goal4 then show ?case + apply (erule_tac Q="None = ?X" in contrapos_pp) + apply (erule_tac x="(aa,b)" and P="\ x. None \ split (\out. k) x" in ballE) + apply (auto simp: aux (*Option.not_None_eq*) image_def split_def intro!: rev_bexI) + done +next + case goal5 then show ?case + apply simp apply ((erule_tac x="(ab,ba)" in ballE)+) + apply (simp_all add: aux (* Option.not_None_eq *), (erule exE)+, simp add:split_def) + apply (erule rev_bexI, case_tac "None\(\p. h(snd p))`y",auto simp:split_def) + done + +next + case goal6 then show ?case + apply simp apply ((erule_tac x="(a,b)" in ballE)+) + apply (simp_all add: aux (* Option.not_None_eq *), (erule exE)+, simp add:split_def) + apply (erule rev_bexI, case_tac "None\(\p. h(snd p))`y",auto simp:split_def) + done +qed + + +section{* Valid Test Sequences in the State Exception Monad *} +text{* + This is still an unstructured merge of executable monad concepts and specification oriented + high-level properties initiating test procedures. +*} + +definition valid_SE :: "'\ \ (bool,'\) MON\<^sub>S\<^sub>E \ bool" (infix "\" 15) +where "(\ \ m) = (m \ \ None \ fst(the (m \)))" +text{* + This notation consideres failures as valid---a definition inspired by I/O conformance. + Note that it is not possible to define this concept once and for all in a Hindley-Milner + type-system. For the moment, we present it only for the state-exception monad, although for + the same definition, this notion is applicable to other monads as well. +*} + +lemma syntax_test : + "\ \ (os \ (mbind \s ioprog); return(length \s = length os))" +oops + + +lemma valid_true[simp]: "(\ \ (s \ return x ; return (P s))) = P x" + by(simp add: valid_SE_def unit_SE_def bind_SE_def) + +text{* Recall mbind\_unit for the base case. *} + +lemma valid_failure: "ioprog a \ = None \ + (\ \ (s \ mbind (a#S) ioprog ; M s)) = + (\ \ (M []))" +by(simp add: valid_SE_def unit_SE_def bind_SE_def) + + + +lemma valid_failure': "A \ = None \ \(\ \ ((s \ A ; M s)))" +by(simp add: valid_SE_def unit_SE_def bind_SE_def) + +lemma valid_successElem: (* atomic boolean Monad "Query Functions" *) + "M \ = Some(f \,\) \ (\ \ M) = f \" +by(simp add: valid_SE_def unit_SE_def bind_SE_def ) + +lemma valid_success: "ioprog a \ = Some(b,\') \ + (\ \ (s \ mbind (a#S) ioprog ; M s)) = + (\' \ (s \ mbind S ioprog ; M (b#s)))" + apply (simp add: valid_SE_def unit_SE_def bind_SE_def ) + apply (cases "mbind S ioprog \'", auto) +done + +lemma valid_success'': "ioprog a \ = Some(b,\') \ + (\ \ (s \ mbind (a#S) ioprog ; return (P s))) = + (\' \ (s \ mbind S ioprog ; return (P (b#s))))" + apply (simp add: valid_SE_def unit_SE_def bind_SE_def ) + apply (cases "mbind S ioprog \'") + apply (simp_all) + apply (auto) +done + +lemma valid_success': "A \ = Some(b,\') \ (\ \ ((s \ A ; M s))) = (\' \ (M b))" +by(simp add: valid_SE_def unit_SE_def bind_SE_def ) + +lemma valid_both: "(\ \ (s \ mbind (a#S) ioprog ; return (P s))) = + (case ioprog a \ of + None \ (\ \ (return (P []))) + | Some(b,\') \ (\' \ (s \ mbind S ioprog ; return (P (b#s)))))" + apply (case_tac "ioprog a \") + apply (simp_all add: valid_failure valid_success'' split: prod.splits) +done + +lemma valid_propagate_1 [simp]: "(\ \ (return P)) = (P)" + by(auto simp: valid_SE_def unit_SE_def) + +lemma valid_propagate_2: "\ \ ((s \ A ; M s)) \ \ v \'. the(A \) = (v,\') \ \' \ (M v)" + apply (auto simp: valid_SE_def unit_SE_def bind_SE_def) + apply (cases "A \") + apply (simp_all) + apply (drule_tac x="A \" and f=the in arg_cong) + apply (simp) + apply (rule_tac x="fst aa" in exI) + apply (rule_tac x="snd aa" in exI) + apply (auto) +done + + +lemma valid_propagate_2': "\ \ ((s \ A ; M s)) \ \ a. (A \) = Some a \ (snd a) \ (M (fst a))" + apply (auto simp: valid_SE_def unit_SE_def bind_SE_def) + apply (cases "A \") + apply (simp_all) + apply (simp_all split: prod.splits) + apply (drule_tac x="A \" and f=the in arg_cong) + apply (simp) + apply (rule_tac x="fst aa" in exI) + apply (rule_tac x="snd aa" in exI) + apply (auto) +done + + + +lemma valid_propagate_2'': "\ \ ((s \ A ; M s)) \ \ v \'. A \ = Some(v,\') \ \' \ (M v)" + apply (auto simp: valid_SE_def unit_SE_def bind_SE_def) + apply (cases "A \") + apply (simp_all) + apply (drule_tac x="A \" and f=the in arg_cong) + apply (simp) + apply (rule_tac x="fst aa" in exI) + apply (rule_tac x="snd aa" in exI) + apply (auto) +done + +lemma valid_propoagate_3[simp]: "(\\<^sub>0 \ (\\. Some (f \, \))) = (f \\<^sub>0)" + by(simp add: valid_SE_def ) + +lemma valid_propoagate_3'[simp]: "\(\\<^sub>0 \ (\\. None))" + by(simp add: valid_SE_def ) + +lemma assert_disch1 :" P \ \ (\ \ (x \ assert\<^sub>S\<^sub>E P; M x)) = (\ \ (M True))" + by(auto simp: bind_SE_def assert_SE_def valid_SE_def) + +lemma assert_disch2 :" \ P \ \ \ (\ \ (x \ assert\<^sub>S\<^sub>E P ; M s))" + by(auto simp: bind_SE_def assert_SE_def valid_SE_def) + +lemma assert_disch3 :" \ P \ \ \ (\ \ (assert\<^sub>S\<^sub>E P))" + by(auto simp: bind_SE_def assert_SE_def valid_SE_def) + +lemma assert_D : "(\ \ (x \ assert\<^sub>S\<^sub>E P; M x)) \ P \ \ (\ \ (M True))" + by(auto simp: bind_SE_def assert_SE_def valid_SE_def split: HOL.split_if_asm) + +lemma assume_D : "(\ \ (x \ assume\<^sub>S\<^sub>E P; M x)) \ \ \. (P \ \ \ \ (M ()))" + apply (auto simp: bind_SE_def assume_SE_def valid_SE_def split: HOL.split_if_asm) + apply (rule_tac x="Eps P" in exI) + apply (auto) + apply (rule_tac x="True" in exI, rule_tac x="b" in exI) + apply (subst Hilbert_Choice.someI) + apply (assumption) + apply (simp) + apply (subst Hilbert_Choice.someI,assumption) + apply (simp) +done + +text{* + These two rule prove that the SE Monad in connection with the notion of valid sequence is + actually sufficient for a representation of a Boogie-like language. The SBE monad with explicit + sets of states---to be shown below---is strictly speaking not necessary (and will therefore + be discontinued in the development). +*} + +lemma if_SE_D1 : "P \ \ (\ \ if\<^sub>S\<^sub>E P B\<^sub>1 B\<^sub>2) = (\ \ B\<^sub>1)" + by(auto simp: if_SE_def valid_SE_def) + +lemma if_SE_D2 : "\ P \ \ (\ \ if\<^sub>S\<^sub>E P B\<^sub>1 B\<^sub>2) = (\ \ B\<^sub>2)" + by(auto simp: if_SE_def valid_SE_def) + +lemma if_SE_split_asm : " (\ \ if\<^sub>S\<^sub>E P B\<^sub>1 B\<^sub>2) = ((P \ \ (\ \ B\<^sub>1)) \ (\ P \ \ (\ \ B\<^sub>2)))" + by(cases "P \",auto simp: if_SE_D1 if_SE_D2) + +lemma if_SE_split : " (\ \ if\<^sub>S\<^sub>E P B\<^sub>1 B\<^sub>2) = ((P \ \ (\ \ B\<^sub>1)) \ (\ P \ \ (\ \ B\<^sub>2)))" + by(cases "P \", auto simp: if_SE_D1 if_SE_D2) + +lemma [code]: "(\ \ m) = (case (m \) of None \ False | (Some (x,y)) \ x)" + apply (simp add: valid_SE_def) + apply (cases "m \ = None") + apply (simp_all) + apply (insert not_None_eq) + apply (auto) +done + +section{* Valid Test Sequences in the State Exception Backtrack Monad *} +text{* + This is still an unstructured merge of executable monad concepts and specification oriented + high-level properties initiating test procedures. +*} + +definition valid_SBE :: "'\ \ ('a,'\) MON\<^sub>S\<^sub>B\<^sub>E \ bool" (infix "\\<^sub>S\<^sub>B\<^sub>E" 15) +where "\ \\<^sub>S\<^sub>B\<^sub>E m \ (m \ \ None)" +text{* + This notation considers all non-failures as valid. +*} + +lemma assume_assert: "(\ \\<^sub>S\<^sub>B\<^sub>E ( _ :\ assume\<^sub>S\<^sub>B\<^sub>E P ; assert\<^sub>S\<^sub>B\<^sub>E Q)) = (P \ \ Q \)" + by(simp add: valid_SBE_def assume_SBE_def assert_SBE_def bind_SBE_def) + +lemma assert_intro: "Q \ \ \ \\<^sub>S\<^sub>B\<^sub>E (assert\<^sub>S\<^sub>B\<^sub>E Q)" + by(simp add: valid_SBE_def assume_SBE_def assert_SBE_def bind_SBE_def) + + +(* legacy : lemmas valid_failure''=valid_failure *) +end diff --git a/Normalisation.thy b/Normalisation.thy new file mode 100644 index 0000000..5452064 --- /dev/null +++ b/Normalisation.thy @@ -0,0 +1,428 @@ +(***************************************************************************** + * HOL-TestGen --- theorem-prover based test case generation + * http://www.brucker.ch/projects/hol-testgen/ + * + * This file is part of HOL-TestGen. + * + * Copyright (c) 2005-2012 ETH Zurich, Switzerland + * 2008-2014 Achim D. Brucker, Germany + * 2009-2014 Université Paris-Sud, France + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * + * * Neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + ******************************************************************************) +(* $Id: Normalisation.thy 10879 2014-10-26 11:35:31Z brucker $ *) + +header{* Policy Transformations *} +theory + Normalisation +imports + SeqComposition + ParallelComposition +begin + +text{* + This theory provides the formalisations required for the transformation of UPF policies. + A typical usage scenario can be observed in the firewall case + study~\cite{brucker.ea:formal-fw-testing:2014}. +*} + +section{* Elementary Operators *} +text{* + We start by providing several operators and theorems useful when reasoning about a list of + rules which should eventually be interpreted as combined using the standard override operator. +*} + +text{* + The following definition takes as argument a list of rules and returns a policy where the + rules are combined using the standard override operator. +*} +definition list2policy::"('a \ 'b) list \ ('a \ 'b)" where + "list2policy l = foldr (\ x y. (x \ y)) l \" + +text{* + Determine the position of element of a list. +*} +fun position :: "'\ \ '\ list \ nat" where + "position a [] = 0" + |"(position a (x#xs)) = (if a = x then 1 else (Suc (position a xs)))" + +text{* + Provides the first applied rule of a policy given as a list of rules. +*} +fun applied_rule where + "applied_rule C a (x#xs) = (if a \ dom (C x) then (Some x) + else (applied_rule C a xs))" + |"applied_rule C a [] = None" + +text {* + The following is used if the list is constructed backwards. +*} +definition applied_rule_rev where + "applied_rule_rev C a x = applied_rule C a (rev x)" + +text{* + The following is a typical policy transformation. It can be applied to any type of policy and + removes all the rules from a policy with an empty domain. It takes two arguments: a semantic + interpretation function and a list of rules. +*} +fun rm_MT_rules where + "rm_MT_rules C (x#xs) = (if dom (C x)= {} + then rm_MT_rules C xs + else x#(rm_MT_rules C xs))" + |"rm_MT_rules C [] = []" + +text {* + The following invariant establishes that there are no rules with an empty domain in a list + of rules. +*} +fun none_MT_rules where + "none_MT_rules C (x#xs) = (dom (C x) \ {} \ (none_MT_rules C xs))" + |"none_MT_rules C [] = True" + +text{* + The following related invariant establishes that the policy has not a completely empty domain. +*} +fun not_MT where + "not_MT C (x#xs) = (if (dom (C x) = {}) then (not_MT C xs) else True)" + |"not_MT C [] = False" + +text{* +Next, a few theorems about the two invariants and the transformation: +*} +lemma none_MT_rules_vs_notMT: "none_MT_rules C p \ p \ [] \ not_MT C p" + apply (induct p) + apply (simp_all) +done + +lemma rmnMT: "none_MT_rules C (rm_MT_rules C p)" + apply (induct p) + apply (simp_all) +done + +lemma rmnMT2: "none_MT_rules C p \ (rm_MT_rules C p) = p" + apply (induct p) + apply (simp_all) +done + +lemma nMTcharn: "none_MT_rules C p = (\ r \ set p. dom (C r) \ {})" + apply (induct p) + apply (simp_all) +done + +lemma nMTeqSet: "set p = set s \ none_MT_rules C p = none_MT_rules C s" + apply (simp add: nMTcharn) +done + +lemma notMTnMT: "\a \ set p; none_MT_rules C p\ \ dom (C a) \ {}" + apply (simp add: nMTcharn) +done + +lemma none_MT_rulesconc: "none_MT_rules C (a@[b]) \ none_MT_rules C a" + apply (induct a) + apply (simp_all) +done + +lemma nMTtail: "none_MT_rules C p \ none_MT_rules C (tl p)" + apply (induct p) + apply (simp_all) +done + +lemma not_MTimpnotMT[simp]: "not_MT C p \ p \ []" + apply (auto) +done + +lemma SR3nMT: "\ not_MT C p \ rm_MT_rules C p = []" + apply (induct p) + apply (auto simp: if_splits) +done + +lemma NMPcharn: "\a \ set p; dom (C a) \ {}\ \ not_MT C p" + apply (induct p) + apply (auto simp: if_splits) +done + +lemma NMPrm: "not_MT C p \ not_MT C (rm_MT_rules C p)" + apply (induct p) + apply (simp_all) +done + +text{* Next, a few theorems about applied\_rule: *} +lemma mrconc: "applied_rule_rev C x p = Some a \ applied_rule_rev C x (b#p) = Some a" +proof (induct p rule: rev_induct) +case Nil show ?case using Nil + by (simp add: applied_rule_rev_def) +next +case (snoc xs x) show ?case using snoc + apply (simp add: applied_rule_rev_def if_splits) + by (metis option.inject) +qed + +lemma mreq_end: "\applied_rule_rev C x b = Some r; applied_rule_rev C x c = Some r\ \ + applied_rule_rev C x (a#b) = applied_rule_rev C x (a#c)" +by (simp add: mrconc) + +lemma mrconcNone: "applied_rule_rev C x p = None \ + applied_rule_rev C x (b#p) = applied_rule_rev C x [b]" +proof (induct p rule: rev_induct) +case Nil show ?case + by (simp add: applied_rule_rev_def) +next +case (snoc ys y) show ?case using snoc + proof (cases "x \ dom (C ys)") + case True show ?thesis using True snoc + by (auto simp: applied_rule_rev_def) + next + case False show ?thesis using False snoc + by (auto simp: applied_rule_rev_def) +qed +qed + +lemma mreq_endNone: "\applied_rule_rev C x b = None; applied_rule_rev C x c = None\ \ + applied_rule_rev C x (a#b) = applied_rule_rev C x (a#c)" +by (metis mrconcNone) + +lemma mreq_end2: "applied_rule_rev C x b = applied_rule_rev C x c \ + applied_rule_rev C x (a#b) = applied_rule_rev C x (a#c)" + apply (case_tac "applied_rule_rev C x b = None") + apply (auto intro: mreq_end mreq_endNone) +done + +lemma mreq_end3: "applied_rule_rev C x p \ None \ + applied_rule_rev C x (b # p) = applied_rule_rev C x (p)" +by (auto simp: mrconc) + +lemma mrNoneMT: "\r \ set p; applied_rule_rev C x p = None\ \ + x \ dom (C r)" +proof (induct p rule: rev_induct) + case Nil show ?case using Nil + by (simp add: applied_rule_rev_def) + next + case (snoc y ys) show ?case using snoc + proof (cases "r \ set ys") + case True show ?thesis using snoc True + by (simp add: applied_rule_rev_def split: split_if_asm) + next + case False show ?thesis using snoc False + by (simp add: applied_rule_rev_def split: split_if_asm) + qed +qed + + +section{* Distributivity of the Transformation. *} +text{* + The scenario is the following (can be applied iteratively): + \begin{itemize} + \item Two policies are combined using one of the parallel combinators + \item (e.g. P = P1 P2) (At least) one of the constituent policies has + \item a normalisation procedures, which as output produces a list of + \item policies that are semantically equivalent to the original policy if + \item combined from left to right using the override operator. + \end{itemize} +*} + +text{* + The following function is crucial for the distribution. Its arguments are a policy, a list + of policies, a parallel combinator, and a range and a domain coercion function. +*} +fun prod_list :: "('\ \'\) \ (('\ \'\) list) \ + (('\ \'\) \ ('\ \'\) \ (('\ \ '\) \ ('\ \ '\))) \ + (('\ \ '\) \ 'y) \ ('x \ ('\ \ '\)) \ + (('x \ 'y) list)" (infixr "\\<^sub>L" 54) where + "prod_list x (y#ys) par_comb ran_adapt dom_adapt = + ((ran_adapt o_f ((par_comb x y) o dom_adapt))#(prod_list x ys par_comb ran_adapt dom_adapt))" +| "prod_list x [] par_comb ran_adapt dom_adapt = []" + +text{* + An instance, as usual there are four of them. +*} + +definition prod_2_list :: "[('\ \'\), (('\ \'\) list)] \ + (('\ \ '\) \ 'y) \ ('x \ ('\ \ '\)) \ + (('x \ 'y) list)" (infixr "\\<^sub>2\<^sub>L" 55) where + "x \\<^sub>2\<^sub>L y = (\ d r. (x \\<^sub>L y) (op \\<^sub>2) d r)" + +lemma list2listNMT: "x \ [] \ map sem x \ []" + apply (case_tac x) + apply (simp_all) +done + +lemma two_conc: "(prod_list x (y#ys) p r d) = ((r o_f ((p x y) o d))#(prod_list x ys p r d))" +by simp + +text{* + The following two invariants establish if the law of distributivity holds for a combinator + and if an operator is strict regarding undefinedness. +*} +definition is_distr where + "is_distr p = (\ g f. (\ N P1 P2. ((g o_f ((p N (P1 \ P2)) o f)) = + ((g o_f ((p N P1) o f)) \ (g o_f ((p N P2) o f))))))" + +definition is_strict where + "is_strict p = (\ r d. \ P1. (r o_f (p P1 \ \ d)) = \)" + +lemma is_distr_orD: "is_distr (op \\<^sub>\\<^sub>D) d r" + apply (simp add: is_distr_def) + apply (rule allI)+ + apply (rule distr_orD) + apply (simp) +done + +lemma is_strict_orD: "is_strict (op \\<^sub>\\<^sub>D) d r" + apply (simp add: is_strict_def) + apply (simp add: policy_range_comp_def) +done + +lemma is_distr_2: "is_distr (op \\<^sub>2) d r" + apply (simp add: is_distr_def) + apply (rule allI)+ + apply (rule distr_or2) +by simp + +lemma is_strict_2: "is_strict (op \\<^sub>2) d r" + apply (simp only: is_strict_def) + apply simp + apply (simp add: policy_range_comp_def) +done + +lemma domStart: "t \ dom p1 \ (p1 \ p2) t = p1 t" + apply (simp add: map_add_dom_app_simps) +done + +lemma notDom: "x \ dom A \ \ A x = None" + apply auto +done + +text{* + The following theorems are crucial: they establish the correctness of the distribution. +*} +lemma Norm_Distr_1: "((r o_f (((op \\<^sub>1) P1 (list2policy P2)) o d)) x = + ((list2policy ((P1 \\<^sub>L P2) (op \\<^sub>1) r d)) x))" +proof (induct P2) +case Nil show ?case + by (simp add: policy_range_comp_def list2policy_def) +next +case (Cons p ps) show ?case using Cons + proof (cases "x \ dom (r o_f ((P1 \\<^sub>1 p) \ d))") + case True show ?thesis using True + by (auto simp: list2policy_def policy_range_comp_def prod_1_def + split: option.splits decision.splits prod.splits) + next + case False show ?thesis using Cons False + by (auto simp: list2policy_def policy_range_comp_def map_add_dom_app_simps(3) prod_1_def + split: option.splits decision.splits prod.splits) + qed +qed + +lemma Norm_Distr_2: "((r o_f (((op \\<^sub>2) P1 (list2policy P2)) o d)) x = + ((list2policy ((P1 \\<^sub>L P2) (op \\<^sub>2) r d)) x))"proof (induct P2) +case Nil show ?case + by (simp add: policy_range_comp_def list2policy_def) +next +case (Cons p ps) show ?case using Cons + proof (cases "x \ dom (r o_f ((P1 \\<^sub>2 p) \ d))") + case True show ?thesis using True + by (auto simp: list2policy_def prod_2_def policy_range_comp_def + split: option.splits decision.splits prod.splits) + next + case False show ?thesis using Cons False + by (auto simp: policy_range_comp_def list2policy_def map_add_dom_app_simps(3) prod_2_def + split: option.splits decision.splits prod.splits) + qed +qed + +lemma Norm_Distr_A: "((r o_f (((op \\<^sub>\\<^sub>A) P1 (list2policy P2)) o d)) x = + ((list2policy ((P1 \\<^sub>L P2) (op \\<^sub>\\<^sub>A) r d)) x))" +proof (induct P2) +case Nil show ?case + by (simp add: policy_range_comp_def list2policy_def) +next +case (Cons p ps) show ?case using Cons + proof (cases "x \ dom (r o_f ((P1 \\<^sub>\\<^sub>A p) \ d))") + case True show ?thesis using True + by (auto simp: policy_range_comp_def list2policy_def prod_orA_def + split: option.splits decision.splits prod.splits) + next + case False show ?thesis using Cons False + by (auto simp: policy_range_comp_def list2policy_def map_add_dom_app_simps(3) prod_orA_def + split: option.splits decision.splits prod.splits) + qed +qed + + +lemma Norm_Distr_D: "((r o_f (((op \\<^sub>\\<^sub>D) P1 (list2policy P2)) o d)) x = + ((list2policy ((P1 \\<^sub>L P2) (op \\<^sub>\\<^sub>D) r d)) x))" +proof (induct P2) +case Nil show ?case + by (simp add: policy_range_comp_def list2policy_def) +next +case (Cons p ps) show ?case using Cons + proof (cases "x \ dom (r o_f ((P1 \\<^sub>\\<^sub>D p) \ d))") + case True show ?thesis using True + by (auto simp: policy_range_comp_def list2policy_def prod_orD_def + split: option.splits decision.splits prod.splits) + next + case False show ?thesis using Cons False + by (auto simp: policy_range_comp_def list2policy_def map_add_dom_app_simps(3) prod_orD_def + split: option.splits decision.splits prod.splits) + qed +qed + +text {* Some domain reasoning *} +lemma domSubsetDistr1: "dom A = UNIV \ dom ((\(x, y). x) o_f (A \\<^sub>1 B) o (\ x. (x,x))) = dom B" + apply (rule set_eqI) + apply (rule iffI) + apply (auto simp: prod_1_def policy_range_comp_def dom_def + split: decision.splits option.splits prod.splits) +done + +lemma domSubsetDistr2: "dom A = UNIV \ dom ((\(x, y). x) o_f (A \\<^sub>2 B) o (\ x. (x,x))) = dom B" + apply (rule set_eqI) + apply (rule iffI) + apply (auto simp: prod_2_def policy_range_comp_def dom_def + split: decision.splits option.splits prod.splits) +done + +lemma domSubsetDistrA: "dom A = UNIV \ dom ((\(x, y). x) o_f (A \\<^sub>\\<^sub>A B) o (\ x. (x,x))) = dom B" + apply (rule set_eqI) + apply (rule iffI) + apply (auto simp: prod_orA_def policy_range_comp_def dom_def + split: decision.splits option.splits prod.splits) +done + +lemma domSubsetDistrD: "dom A = UNIV \ dom ((\(x, y). x) o_f (A \\<^sub>\\<^sub>D B) o (\ x. (x,x))) = dom B" + apply (rule set_eqI) + apply (rule iffI) + apply (auto simp: prod_orD_def policy_range_comp_def dom_def + split: decision.splits option.splits prod.splits) +done +end + + diff --git a/NormalisationTestSpecification.thy b/NormalisationTestSpecification.thy new file mode 100644 index 0000000..1951bc3 --- /dev/null +++ b/NormalisationTestSpecification.thy @@ -0,0 +1,157 @@ +(***************************************************************************** + * HOL-TestGen --- theorem-prover based test case generation + * http://www.brucker.ch/projects/hol-testgen/ + * + * This file is part of HOL-TestGen. + * + * Copyright (c) 2005-2012 ETH Zurich, Switzerland + * 2008-2014 Achim D. Brucker, Germany + * 2009-2014 Université Paris-Sud, France + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * + * * Neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + ******************************************************************************) +(* $Id: NormalisationTestSpecification.thy 10879 2014-10-26 11:35:31Z brucker $ *) + +header {* Policy Transformation for Testing *} +theory + NormalisationTestSpecification +imports + Normalisation +begin + +text{* + This theory provides functions and theorems which are useful if one wants to test policy + which are transformed. Most exist in two versions: one where the domains of the rules + of the list (which is the result of a transformation) are pairwise disjoint, and one where + this applies not for the last rule in a list (which is usually a default rules). + + The examples in the firewall case study provide a good documentation how these theories can + be applied. +*} + +text{* + This invariant establishes that the domains of a list of rules are pairwise disjoint. +*} +fun disjDom where + "disjDom (x#xs) = ((\y\(set xs). dom x \ dom y = {}) \ disjDom xs)" +|"disjDom [] = True" + +fun PUTList :: "('a \ 'b) \ 'a \ ('a \ 'b) list \ bool" + where + "PUTList PUT x (p#ps) = ((x \ dom p \ (PUT x = p x)) \ (PUTList PUT x ps))" +|"PUTList PUT x [] = True" + +lemma distrPUTL1: "x \ dom P \ (list2policy PL) x = P x + \ (PUTList PUT x PL \ (PUT x = P x))" + apply (induct PL) + apply (auto simp: list2policy_def dom_def) +done + +lemma PUTList_None: "x \ dom (list2policy list) \ PUTList PUT x list" + apply (induct list) + apply (auto simp: list2policy_def dom_def) +done + +lemma PUTList_DomMT: + "(\y\set list. dom a \ dom y = {}) \ x \ (dom a) \ x \ dom (list2policy list)" + apply (induct list) + apply (auto simp: dom_def list2policy_def) +done + +lemma distrPUTL2: + "x \ dom P \ (list2policy PL) x = P x \ disjDom PL \ (PUT x = P x) \ PUTList PUT x PL " + apply (induct PL) + apply (simp_all add: list2policy_def) + apply (auto) + apply (case_tac "x \ dom a") + apply (case_tac "list2policy PL x = P x") + apply (simp add: list2policy_def) + apply (rule PUTList_None) + apply (rule_tac a = a in PUTList_DomMT) + apply (simp_all add: list2policy_def dom_def) +done + +lemma distrPUTL: + "\x \ dom P; (list2policy PL) x = P x; disjDom PL\ \ (PUT x = P x) = PUTList PUT x PL " + apply (rule iffI) + apply (rule distrPUTL2) + apply (simp_all) + apply (rule_tac PL = PL in distrPUTL1) + apply (auto) +done + +text{* + It makes sense to cater for the common special case where the normalisation returns a list + where the last element is a default-catch-all rule. It seems easier to cater for this globally, + rather than to require the normalisation procedures to do this. +*} + +fun gatherDomain_aux where + "gatherDomain_aux (x#xs) = (dom x \ (gatherDomain_aux xs))" +|"gatherDomain_aux [] = {}" + +definition gatherDomain where "gatherDomain p = (gatherDomain_aux (butlast p))" + +definition PUTListGD where "PUTListGD PUT x p = + (((x \ (gatherDomain p) \ x \ dom (last p)) \ PUT x = (last p) x) \ + (PUTList PUT x (butlast p)))" + + +definition disjDomGD where "disjDomGD p = disjDom (butlast p)" + +lemma distrPUTLG1: "\x \ dom P; (list2policy PL) x = P x; PUTListGD PUT x PL\ \ PUT x = P x" + apply (induct PL) + apply (simp_all add: domIff PUTListGD_def disjDomGD_def gatherDomain_def list2policy_def) + apply (auto simp: dom_def domIff split: split_if_asm) +done + +lemma distrPUTLG2: + "PL \ [] \ x \ dom P \ (list2policy (PL)) x = P x \ disjDomGD PL \ + (PUT x = P x) \ PUTListGD PUT x (PL)" + apply (simp add: PUTListGD_def disjDomGD_def gatherDomain_def list2policy_def) + apply (induct PL) + apply (auto) + apply (metis PUTList_DomMT PUTList_None domI) +done + +lemma distrPUTLG: + "\x \ dom P; (list2policy PL) x = P x; disjDomGD PL; PL \ []\ \ + (PUT x = P x) = PUTListGD PUT x PL " + apply (rule iffI) + apply (rule distrPUTLG2) + apply (simp_all) + apply (rule_tac PL = PL in distrPUTLG1) + apply (auto) +done + +end + + diff --git a/ParallelComposition.thy b/ParallelComposition.thy new file mode 100644 index 0000000..f8f35c0 --- /dev/null +++ b/ParallelComposition.thy @@ -0,0 +1,330 @@ +(***************************************************************************** + * HOL-TestGen --- theorem-prover based test case generation + * http://www.brucker.ch/projects/hol-testgen/ + * + * This file is part of HOL-TestGen. + * + * Copyright (c) 2005-2012 ETH Zurich, Switzerland + * 2008-2014 Achim D. Brucker, Germany + * 2009-2014 Université Paris-Sud, France + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * + * * Neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + ******************************************************************************) +(* $Id: ParallelComposition.thy 10879 2014-10-26 11:35:31Z brucker $ *) + +header{* Parallel Composition*} +theory + ParallelComposition +imports + ElementaryPolicies +begin + +text{* + The following combinators are based on the idea that two policies are executed in parallel. + Since both input and the output can differ, we chose to pair them. + + The new input pair will often contain repetitions, which can be reduced using the + domain-restriction and domain-reduction operators. Using additional range-modifying operators + such as $\nabla$, decide which result argument is chosen; this might be the first or the latter + or, in case that $\beta = \gamma$, and $\beta$ underlies a lattice structure, the supremum or + infimum of both, or, an arbitrary combination of them. + + In any case, although we have strictly speaking a pairing of decisions and not a nesting of + them, we will apply the same notational conventions as for the latter, i.e. as for + flattening. +*} + +section{* Parallel Combinators: Foundations *} +text {* + There are four possible semantics how the decision can be combined, thus there are four + parallel composition operators. For each of them, we prove several properties. +*} + +definition prod_orA ::"['\\'\, '\ \'\] \ ('\\'\ \ '\\'\)" (infixr "\\<^sub>\\<^sub>A" 55) +where "p1 \\<^sub>\\<^sub>A p2 = + (\(x,y). (case p1 x of + \allow d1\ \(case p2 y of + \allow d2\ \ \allow(d1,d2)\ + | \deny d2\ \ \allow(d1,d2)\ + | \ \ \) + | \deny d1\\(case p2 y of + \allow d2\ \ \allow(d1,d2)\ + | \deny d2\ \ \deny (d1,d2)\ + | \ \ \) + | \ \ \))" + +lemma prod_orA_mt[simp]:"p \\<^sub>\\<^sub>A \ = \" + apply (rule ext) + apply (simp add: prod_orA_def) + apply (auto) + apply (simp split: option.splits decision.splits) +done + +lemma mt_prod_orA[simp]:"\ \\<^sub>\\<^sub>A p = \" + apply (rule ext) + apply (simp add: prod_orA_def) +done + +lemma prod_orA_quasi_commute: "p2 \\<^sub>\\<^sub>A p1 = (((\(x,y). (y,x)) o_f (p1 \\<^sub>\\<^sub>A p2))) o (\(a,b).(b,a))" + apply (rule ext) + apply (simp add: prod_orA_def policy_range_comp_def o_def) + apply (auto) + apply (simp split: option.splits decision.splits) +done + +definition prod_orD ::"['\ \ '\, '\ \ '\] \ ('\ \ '\ \ '\ \ '\ )" (infixr "\\<^sub>\\<^sub>D" 55) +where "p1 \\<^sub>\\<^sub>D p2 = + (\(x,y). (case p1 x of + \allow d1\ \(case p2 y of + \allow d2\ \ \allow(d1,d2)\ + | \deny d2\ \ \deny(d1,d2)\ + | \ \ \) + | \deny d1\\(case p2 y of + \allow d2\ \ \deny(d1,d2)\ + | \deny d2\ \ \deny (d1,d2)\ + | \ \ \) + | \ \ \))" + +lemma prod_orD_mt[simp]:"p \\<^sub>\\<^sub>D \ = \" + apply (rule ext) + apply (simp add: prod_orD_def) + apply (auto) + apply (simp split: option.splits decision.splits) +done + +lemma mt_prod_orD[simp]:"\ \\<^sub>\\<^sub>D p = \" + apply (rule ext) + apply (simp add: prod_orD_def) +done + +lemma prod_orD_quasi_commute: "p2 \\<^sub>\\<^sub>D p1 = (((\(x,y). (y,x)) o_f (p1 \\<^sub>\\<^sub>D p2))) o (\(a,b).(b,a))" + apply (rule ext) + apply (simp add: prod_orD_def policy_range_comp_def o_def) + apply (auto) + apply (simp split: option.splits decision.splits) +done + +text{* + The following two combinators are by definition non-commutative, but still strict. +*} + +definition prod_1 :: "['\\'\, '\ \'\] \ ('\\'\ \ '\\'\)" (infixr "\\<^sub>1" 55) +where "p1 \\<^sub>1 p2 \ + (\(x,y). (case p1 x of + \allow d1\\(case p2 y of + \allow d2\ \ \allow(d1,d2)\ + | \deny d2\ \ \allow(d1,d2)\ + | \ \ \) + | \deny d1\ \(case p2 y of + \allow d2\ \ \deny(d1,d2)\ + | \deny d2\ \ \deny(d1,d2)\ + | \ \ \) + |\ \ \))" + +lemma prod_1_mt[simp]:"p \\<^sub>1 \ = \" + apply (rule ext) + apply (simp add: prod_1_def) + apply (auto) + apply (simp split: option.splits decision.splits) +done + +lemma mt_prod_1[simp]:"\ \\<^sub>1 p = \" + apply (rule ext) + apply (simp add: prod_1_def) +done + +definition prod_2 :: "['\\'\, '\ \'\] \ ('\\'\ \ '\\'\)" (infixr "\\<^sub>2" 55) +where "p1 \\<^sub>2 p2 \ + (\(x,y). (case p1 x of + \allow d1\ \(case p2 y of + \allow d2\ \ \allow(d1,d2)\ + | \deny d2\ \ \deny (d1,d2)\ + | \ \ \) + | \deny d1\\(case p2 y of + \allow d2\ \ \allow(d1,d2)\ + | \deny d2\ \ \deny (d1,d2)\ + | \ \ \) + |\ \\))" + +lemma prod_2_mt[simp]:"p \\<^sub>2 \ = \" + apply (rule ext) + apply (simp add: prod_2_def) + apply (auto) + apply (simp split: option.splits decision.splits) +done + +lemma mt_prod_2[simp]:"\ \\<^sub>2 p = \" + apply (rule ext) + apply (simp add: prod_2_def) +done + +definition prod_1_id ::"['\\'\, '\\'\] \ ('\ \ '\\'\)" (infixr "\\<^sub>1\<^sub>I" 55) +where "p \\<^sub>1\<^sub>I q = (p \\<^sub>1 q) o (\x. (x,x))" + +lemma prod_1_id_mt[simp]:"p \\<^sub>1\<^sub>I \ = \" + apply (rule ext) + apply (simp add: prod_1_id_def) +done + +lemma mt_prod_1_id[simp]:"\ \\<^sub>1\<^sub>I p = \" + apply (rule ext) + apply (simp add: prod_1_id_def prod_1_def) +done + +definition prod_2_id ::"['\\'\, '\\'\] \ ('\ \ '\\'\)" (infixr "\\<^sub>2\<^sub>I" 55) +where"p \\<^sub>2\<^sub>I q = (p \\<^sub>2 q) o (\x. (x,x))" + +lemma prod_2_id_mt[simp]:"p \\<^sub>2\<^sub>I \ = \" + apply (rule ext) + apply (simp add: prod_2_id_def) +done + +lemma mt_prod_2_id[simp]:"\ \\<^sub>2\<^sub>I p = \" + apply (rule ext) + apply (simp add: prod_2_id_def prod_2_def) +done + +section{* Combinators for Transition Policies *} +text {* + For constructing transition policies, two additional combinators are required: one combines + state transitions by pairing the states, the other works equivalently on general maps. +*} + +definition parallel_map :: "('\ \ '\) \ ('\ \ '\) \ + ('\ \ '\ \ '\ \ '\)" (infixr "\\<^sub>M" 60) +where "p1 \\<^sub>M p2 = (\ (x,y). case p1 x of \d1\ \ + (case p2 y of \d2\ \ \(d1,d2)\ + | \ \ \) + | \ \ \)" + +definition parallel_st :: "('i \ '\ \ '\) \ ('i \ '\' \ '\') \ + ('i \ '\ \ '\' \ '\ \ '\')" (infixr "\\<^sub>S" 60) +where + "p1 \\<^sub>S p2 = (p1 \\<^sub>M p2) o (\ (a,b,c). ((a,b),a,c))" + + +section{* Range Splitting *} +text{* + The following combinator is a special case of both a parallel composition operator and a + range splitting operator. Its primary use case is when combining a policy with state transitions. +*} + +definition comp_ran_split :: "[('\ \ '\) \ ('\ \'\), 'd \ '\] \ ('d \ '\) \ ('\ \ '\)" + (infixr "\\<^sub>\" 100) +where "P \\<^sub>\ p \ \x. case p (fst x) of + \allow y\ \ (case ((fst P) (snd x)) of \ \ \ | \z\ \ \allow (y,z)\) + | \deny y\ \ (case ((snd P) (snd x)) of \ \ \ | \z\ \ \deny (y,z)\) + | \ \ \" + +text{* An alternative characterisation of the operator is as follows: *} +lemma comp_ran_split_charn: + "(f, g) \\<^sub>\ p = ( +(((p \ Allow)\\<^sub>\\<^sub>A (A\<^sub>p f)) \ + ((p \ Deny) \\<^sub>\\<^sub>A (D\<^sub>p g))))" + apply (rule ext) + apply (simp add: comp_ran_split_def map_add_def o_def ran_restrict_def image_def + Allow_def Deny_def dom_restrict_def prod_orA_def + allow_pfun_def deny_pfun_def + split:option.splits decision.splits) + apply (auto) +done + +section {* Distributivity of the parallel combinators *} + +lemma distr_or1_a: "(F = F1 \ F2) \ (((N \\<^sub>1 F) o f) = + (((N \\<^sub>1 F1) o f) \ ((N \\<^sub>1 F2) o f))) " + apply (rule ext) + apply (simp add: prod_1_def map_add_def + split: decision.splits option.splits) + apply (case_tac "f x") + apply (simp_all add: prod_1_def map_add_def + split: decision.splits option.splits) +done + +lemma distr_or1: "(F = F1 \ F2) \ ((g o_f ((N \\<^sub>1 F) o f)) = + ((g o_f ((N \\<^sub>1 F1) o f)) \ (g o_f ((N \\<^sub>1 F2) o f)))) " + apply (rule ext)+ + apply (simp add: prod_1_def map_add_def policy_range_comp_def + split: decision.splits option.splits) + apply (case_tac "f x") + apply (simp_all add: prod_1_def map_add_def + split: decision.splits option.splits) +done + +lemma distr_or2_a: "(F = F1 \ F2) \ (((N \\<^sub>2 F) o f) = + (((N \\<^sub>2 F1) o f) \ ((N \\<^sub>2 F2) o f))) " + apply (rule ext) + apply (simp add: prod_2_id_def prod_2_def map_add_def + split: decision.splits option.splits) + apply (case_tac "f x") + apply (simp_all add: prod_2_def map_add_def + split: decision.splits option.splits) +done + +lemma distr_or2: "(F = F1 \ F2) \ ((r o_f ((N \\<^sub>2 F) o f)) = + ((r o_f ((N \\<^sub>2 F1) o f)) \ (r o_f ((N \\<^sub>2 F2) o f)))) " + apply (rule ext) + apply (simp add: prod_2_id_def prod_2_def map_add_def policy_range_comp_def + split: decision.splits option.splits) + apply (case_tac "f x") + apply (simp_all add: prod_2_def map_add_def + split: decision.splits option.splits) +done + +lemma distr_orA: "(F = F1 \ F2) \ ((g o_f ((N \\<^sub>\\<^sub>A F) o f)) = + ((g o_f ((N \\<^sub>\\<^sub>A F1) o f)) \ (g o_f ((N \\<^sub>\\<^sub>A F2) o f)))) " + apply (rule ext)+ + apply (simp add: prod_orA_def map_add_def policy_range_comp_def + split: decision.splits option.splits) + apply (case_tac "f x") + apply (simp_all add: map_add_def + split: decision.splits option.splits) +done + +lemma distr_orD: "(F = F1 \ F2) \ ((g o_f ((N \\<^sub>\\<^sub>D F) o f)) = + ((g o_f ((N \\<^sub>\\<^sub>D F1) o f)) \ (g o_f ((N \\<^sub>\\<^sub>D F2) o f)))) " + apply (rule ext)+ + apply (simp add: prod_orD_def map_add_def policy_range_comp_def + split: decision.splits option.splits) + apply (case_tac "f x") + apply (simp_all add: map_add_def + split: decision.splits option.splits) +done + +lemma coerc_assoc: "(r o_f P) o d = r o_f (P o d)" + apply (simp add: policy_range_comp_def) + apply (rule ext) + apply (simp split: option.splits decision.splits) +done + +lemmas ParallelDefs = prod_orA_def prod_orD_def prod_1_def prod_2_def parallel_map_def + parallel_st_def comp_ran_split_def +end diff --git a/ROOT b/ROOT new file mode 100644 index 0000000..c418faa --- /dev/null +++ b/ROOT @@ -0,0 +1,15 @@ +chapter AFP + +session "UPF" (AFP) = HOL + + description {* The Unified Policy Framework (UPF) *} + options [timeout=300] + theories + Monads + UPF + ServiceExample + document_files + "root.tex" + "root.bib" + "introduction.tex" + "conclusion.tex" + "example-intro.tex" diff --git a/SeqComposition.thy b/SeqComposition.thy new file mode 100644 index 0000000..0410984 --- /dev/null +++ b/SeqComposition.thy @@ -0,0 +1,236 @@ +(***************************************************************************** + * HOL-TestGen --- theorem-prover based test case generation + * http://www.brucker.ch/projects/hol-testgen/ + * + * This file is part of HOL-TestGen. + * + * Copyright (c) 2005-2012 ETH Zurich, Switzerland + * 2008-2014 Achim D. Brucker, Germany + * 2009-2014 Université Paris-Sud, France + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * + * * Neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + ******************************************************************************) +(* $Id: SeqComposition.thy 10879 2014-10-26 11:35:31Z brucker $ *) + +header{* Sequential Composition *} +theory + SeqComposition +imports + ElementaryPolicies +begin + +text{* + Sequential composition is based on the idea that two policies are to be combined by applying + the second policy to the output of the first one. Again, there are four possibilities how the + decisions can be combined. *} + +section {* Flattening *} +text{* + A key concept of sequential policy composition is the flattening of nested decisions. There are + four possibilities, and these possibilities will give the various flavours of policy composition. +*} +fun flat_orA :: "('\ decision) decision \ ('\ decision)" +where "flat_orA(allow(allow y)) = allow y" + |"flat_orA(allow(deny y)) = allow y" + |"flat_orA(deny(allow y)) = allow y" + |"flat_orA(deny(deny y)) = deny y" + +lemma flat_orA_deny[dest]:"flat_orA x = deny y \ x = deny(deny y)" + apply (case_tac "x") + apply (simp_all) + apply (case_tac "\") + apply (simp_all) + apply (case_tac "\") + apply (simp_all) +done + +lemma flat_orA_allow[dest]: "flat_orA x = allow y \ x = allow(allow y) + \ x = allow(deny y) + \ x = deny(allow y)" + apply (case_tac "x") + apply (simp_all) + apply (case_tac "\") + apply (simp_all) + apply (case_tac "\") + apply (simp_all) +done + +fun flat_orD :: "('\ decision) decision \ ('\ decision)" +where "flat_orD(allow(allow y)) = allow y" + |"flat_orD(allow(deny y)) = deny y" + |"flat_orD(deny(allow y)) = deny y" + |"flat_orD(deny(deny y)) = deny y" + +lemma flat_orD_allow[dest]: "flat_orD x = allow y \ x = allow(allow y)" + apply (case_tac "x") + apply (simp_all) + apply (case_tac "\") + apply (simp_all) + apply (case_tac "\") + apply (simp_all) +done + +lemma flat_orD_deny[dest]: "flat_orD x = deny y \ x = deny(deny y) + \ x = allow(deny y) + \ x = deny(allow y)" + apply (case_tac "x") + apply (simp_all) + apply (case_tac "\") + apply (simp_all) + apply (case_tac "\") + apply (simp_all) +done + +fun flat_1 :: "('\ decision) decision \ ('\ decision)" +where "flat_1(allow(allow y)) = allow y" + |"flat_1(allow(deny y)) = allow y" + |"flat_1(deny(allow y)) = deny y" + |"flat_1(deny(deny y)) = deny y" + +lemma flat_1_allow[dest]: "flat_1 x = allow y \ x = allow(allow y) \ x = allow(deny y)" + apply (case_tac "x") + apply (simp_all) + apply (case_tac "\") + apply (simp_all) + apply (case_tac "\") + apply (simp_all) +done + +lemma flat_1_deny[dest]: "flat_1 x = deny y \ x = deny(deny y) \ x = deny(allow y)" + apply (case_tac "x") + apply (simp_all) + apply (case_tac "\") + apply (simp_all) + apply (case_tac "\") + apply (simp_all) +done + +fun flat_2 :: "('\ decision) decision \ ('\ decision)" +where "flat_2(allow(allow y)) = allow y" + |"flat_2(allow(deny y)) = deny y" + |"flat_2(deny(allow y)) = allow y" + |"flat_2(deny(deny y)) = deny y" + +lemma flat_2_allow[dest]: "flat_2 x = allow y \ x = allow(allow y) \ x = deny(allow y)" + apply (case_tac "x") + apply (simp_all) + apply (case_tac "\") + apply (simp_all) + apply (case_tac "\") + apply (simp_all) +done + +lemma flat_2_deny[dest]: "flat_2 x = deny y \ x = deny(deny y) \ x = allow(deny y)" + apply (case_tac "x") + apply (simp_all) + apply (case_tac "\") + apply (simp_all) + apply (case_tac "\") + apply (simp_all) +done + +section{* Policy Composition *} +text{* + The following definition allows to compose two policies. Denies and allows are transferred. +*} + +fun lift :: "('\ \ '\) \ ('\ decision \'\ decision)" +where "lift f (deny s) = (case f s of + \y\ \ \deny y\ + | \ \ \)" + | "lift f (allow s) = (case f s of + \y\ \ \allow y\ + | \ \ \)" + +lemma lift_mt [simp]: "lift \ = \" + apply (rule ext) + apply (case_tac "x") + apply (simp_all) +done + + +text{* + Since policies are maps, we inherit a composition on them. However, this results in nestings + of decisions---which must be flattened. As we now that there are four different forms of + flattening, we have four different forms of policy composition: *} +definition + comp_orA :: "['\\'\, '\\'\] \ '\\'\" (infixl "o'_orA" 55) where + "p2 o_orA p1 \ (map_option flat_orA) o (lift p2 o_m p1)" + +notation (xsymbols) + comp_orA (infixl "\\<^sub>\\<^sub>A" 55) + +lemma comp_orA_mt[simp]:"p \\<^sub>\\<^sub>A \ = \" + by(simp add: comp_orA_def) + +lemma mt_comp_orA[simp]:"\ \\<^sub>\\<^sub>A p = \" + by(simp add: comp_orA_def) + +definition + comp_orD :: "['\\'\, '\\'\] \ '\\'\" (infixl "o'_orD" 55) where + "p2 o_orD p1 \ (map_option flat_orD) o (lift p2 o_m p1)" + +notation (xsymbols) + comp_orD (infixl "\\<^sub>orD" 55) + +lemma comp_orD_mt[simp]:"p o_orD \ = \" + by(simp add: comp_orD_def) + +lemma mt_comp_orD[simp]:"\ o_orD p = \" + by(simp add: comp_orD_def) + +definition + comp_1 :: "['\\'\, '\\'\] \ '\\'\" (infixl "o'_1" 55) where + "p2 o_1 p1 \ (map_option flat_1) o (lift p2 o_m p1)" + +notation (xsymbols) + comp_1 (infixl "\\<^sub>1" 55) + +lemma comp_1_mt[simp]:"p \\<^sub>1 \ = \" + by(simp add: comp_1_def) + +lemma mt_comp_1[simp]:"\ \\<^sub>1 p = \" + by(simp add: comp_1_def) + +definition + comp_2 :: "['\\'\, '\\'\] \ '\\'\" (infixl "o'_2" 55) where + "p2 o_2 p1 \ (map_option flat_2) o (lift p2 o_m p1)" + +notation (xsymbols) + comp_2 (infixl "\\<^sub>2" 55) + +lemma comp_2_mt[simp]:"p \\<^sub>2 \ = \" + by(simp add: comp_2_def) + +lemma mt_comp_2[simp]:"\ \\<^sub>2 p = \" + by(simp add: comp_2_def) + +end diff --git a/Service.thy b/Service.thy new file mode 100644 index 0000000..34ff7ab --- /dev/null +++ b/Service.thy @@ -0,0 +1,478 @@ +(***************************************************************************** + * HOL-TestGen --- theorem-prover based test case generation + * http://www.brucker.ch/projects/hol-testgen/ + * + * This file is part of HOL-TestGen. + * + * Copyright (c) 2010-2012 ETH Zurich, Switzerland + * 2010-2014 Achim D. Brucker, Germany + * 2010-2014 Université Paris-Sud, France + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * + * * Neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + ******************************************************************************) +(* $Id: Service.thy 10945 2014-11-21 12:50:43Z wolff $ *) + +header {* Secure Service Specification *} +theory + Service +imports + UPF +begin +text {* + In this section, we model a simple Web service and its access control model + that allows the staff in a hospital to access health care records of patients. +*} + +section{* Datatypes for Modelling Users and Roles*} +subsection {* Users *} +text{* + First, we introduce a type for users that we use to model that each + staff member has a unique id: +*} +type_synonym user = int (* Each NHS employee has a unique NHS_ID. *) + +text {* + Similarly, each patient has a unique id: +*} +type_synonym patient = int (* Each patient gets a unique id *) + +subsection {* Roles and Relationships*} +text{* In our example, we assume three different roles for members of the clinical staff: *} + +datatype role = ClinicalPractitioner | Nurse | Clerical + +text{* + We model treatment relationships (legitimate relationships) between staff and patients + (respectively, their health records. This access control model is inspired by our detailed + NHS model. +*} +type_synonym lr_id = int +type_synonym LR = "lr_id \ (user set)" + +text{* The security context stores all the existing LRs. *} +type_synonym \ = "patient \ LR" + +text{* The user context stores the roles the users are in. *} +type_synonym \ = "user \ role" + +section {* Modelling Health Records and the Web Service API*} +subsection {* Health Records *} +text {* The content and the status of the entries of a health record *} +datatype data = dummyContent +datatype status = Open | Closed +type_synonym entry_id = int +type_synonym entry = "status \ user \ data" +type_synonym SCR = "(entry_id \ entry)" +type_synonym DB = "patient \ SCR" + +subsection {* The Web Service API *} +text{* The operations provided by the service: *} +datatype Operation = createSCR user role patient + | appendEntry user role patient entry_id entry + | deleteEntry user role patient entry_id + | readEntry user role patient entry_id + | readSCR user role patient + | addLR user role patient lr_id "(user set)" + | removeLR user role patient lr_id + | changeStatus user role patient entry_id status + | deleteSCR user role patient + | editEntry user role patient entry_id entry + +fun is_createSCR where + "is_createSCR (createSCR u r p) = True" +|"is_createSCR x = False" + +fun is_appendEntry where + "is_appendEntry (appendEntry u r p e ei) = True" + |"is_appendEntry x = False" + +fun is_deleteEntry where + "is_deleteEntry (deleteEntry u r p e_id) = True" + |"is_deleteEntry x = False" + +fun is_readEntry where + "is_readEntry (readEntry u r p e) = True" + |"is_readEntry x = False" + +fun is_readSCR where + "is_readSCR (readSCR u r p) = True" + |"is_readSCR x = False" + +fun is_changeStatus where + "is_changeStatus (changeStatus u r p s ei) = True" + |"is_changeStatus x = False" + +fun is_deleteSCR where + "is_deleteSCR (deleteSCR u r p) = True" + |"is_deleteSCR x = False" + +fun is_addLR where + "is_addLR (addLR u r lrid lr us) = True" + |"is_addLR x = False" + +fun is_removeLR where + "is_removeLR (removeLR u r p lr) = True" + |"is_removeLR x = False" + +fun is_editEntry where + "is_editEntry (editEntry u r p e_id s) = True" + |"is_editEntry x = False" + +fun SCROp :: "(Operation \ DB) \ SCR" where + "SCROp ((createSCR u r p), S) = S p" + |"SCROp ((appendEntry u r p ei e), S) = S p" + |"SCROp ((deleteEntry u r p e_id), S) = S p" + |"SCROp ((readEntry u r p e), S) = S p" + |"SCROp ((readSCR u r p), S) = S p" + |"SCROp ((changeStatus u r p s ei),S) = S p" + |"SCROp ((deleteSCR u r p),S) = S p" + |"SCROp ((editEntry u r p e_id s),S) = S p" + |"SCROp x = \" + +fun patientOfOp :: "Operation \ patient" where + "patientOfOp (createSCR u r p) = p" + |"patientOfOp (appendEntry u r p e ei) = p" + |"patientOfOp (deleteEntry u r p e_id) = p" + |"patientOfOp (readEntry u r p e) = p" + |"patientOfOp (readSCR u r p) = p" + |"patientOfOp (changeStatus u r p s ei) = p" + |"patientOfOp (deleteSCR u r p) = p" + |"patientOfOp (addLR u r p lr ei) = p" + |"patientOfOp (removeLR u r p lr) = p" + |"patientOfOp (editEntry u r p e_id s) = p" + +fun userOfOp :: "Operation \ user" where + "userOfOp (createSCR u r p) = u" + |"userOfOp (appendEntry u r p e ei) = u" + |"userOfOp (deleteEntry u r p e_id) = u" + |"userOfOp (readEntry u r p e) = u" + |"userOfOp (readSCR u r p) = u" + |"userOfOp (changeStatus u r p s ei) = u" + |"userOfOp (deleteSCR u r p) = u" + |"userOfOp (editEntry u r p e_id s) = u" + |"userOfOp (addLR u r p lr ei) = u" + |"userOfOp (removeLR u r p lr) = u" + +fun roleOfOp :: "Operation \ role" where + "roleOfOp (createSCR u r p) = r" + |"roleOfOp (appendEntry u r p e ei) = r" + |"roleOfOp (deleteEntry u r p e_id) = r" + |"roleOfOp (readEntry u r p e) = r" + |"roleOfOp (readSCR u r p) = r" + |"roleOfOp (changeStatus u r p s ei) = r" + |"roleOfOp (deleteSCR u r p) = r" + |"roleOfOp (editEntry u r p e_id s) = r" + |"roleOfOp (addLR u r p lr ei) = r" + |"roleOfOp (removeLR u r p lr) = r" + +fun contentOfOp :: "Operation \ data" where + "contentOfOp (appendEntry u r p ei e) = (snd (snd e))" + |"contentOfOp (editEntry u r p ei e) = (snd (snd e))" + +fun contentStatic :: "Operation \ bool" where + "contentStatic (appendEntry u r p ei e) = ((snd (snd e)) = dummyContent)" + |"contentStatic (editEntry u r p ei e) = ((snd (snd e)) = dummyContent)" + |"contentStatic x = True" + +fun allContentStatic where + "allContentStatic (x#xs) = (contentStatic x \ allContentStatic xs)" + |"allContentStatic [] = True" + + +section{* Modelling Access Control*} +text {* + In the following, we define a rather complex access control model for our + scenario that extends traditional role-based access control + (RBAC)~\cite{sandhu.ea:role-based:1996} with treatment relationships and sealed + envelopes. Sealed envelopes (see~\cite{bruegger:generation:2012} for details) + are a variant of break-the-glass access control (see~\cite{brucker.ea:extending:2009} + for a general motivation and explanation of break-the-glass access control). +*} + +subsection {* Sealed Envelopes *} + +type_synonym SEPolicy = "(Operation \ DB \ unit) " + +definition get_entry:: "DB \ patient \ entry_id \ entry option" where + "get_entry S p e_id = (case S p of \ \ \ + | \Scr\ \ Scr e_id)" + +definition userHasAccess:: "user \ entry \ bool" where + "userHasAccess u e = ((fst e) = Open \ (fst (snd e) = u))" + +definition readEntrySE :: SEPolicy where + "readEntrySE x = (case x of (readEntry u r p e_id,S) \ (case get_entry S p e_id of + \ \ \ + | \e\ \ (if (userHasAccess u e) + then \allow ()\ + else \deny ()\ )) + | x \ \)" + +definition deleteEntrySE :: SEPolicy where + "deleteEntrySE x = (case x of (deleteEntry u r p e_id,S) \ (case get_entry S p e_id of + \ \ \ + | \e\ \ (if (userHasAccess u e) + then \allow ()\ + else \deny ()\)) + | x \ \)" + +definition editEntrySE :: SEPolicy where + "editEntrySE x = (case x of (editEntry u r p e_id s,S) \ (case get_entry S p e_id of + \ \ \ + | \e\ \ (if (userHasAccess u e) + then \allow ()\ + else \deny ()\)) + | x \ \)" + +definition SEPolicy :: SEPolicy where + "SEPolicy = editEntrySE \ deleteEntrySE \ readEntrySE \ A\<^sub>U" + + +lemmas SEsimps = SEPolicy_def get_entry_def userHasAccess_def + editEntrySE_def deleteEntrySE_def readEntrySE_def + +subsection {* Legitimate Relationships *} + +type_synonym LRPolicy = "(Operation \ \, unit) policy" + +fun hasLR:: "user \ patient \ \ \ bool" where + "hasLR u p \ = (case \ p of \ \ False + | \lrs\ \ (\lr. lr\(ran lrs) \ u \ lr))" + +definition LRPolicy :: LRPolicy where + "LRPolicy = (\(x,y). (if hasLR (userOfOp x) (patientOfOp x) y + then \allow ()\ + else \deny ()\))" + +definition createSCRPolicy :: LRPolicy where + "createSCRPolicy x = (if (is_createSCR (fst x)) + then \allow ()\ + else \)" + +definition addLRPolicy :: LRPolicy where + "addLRPolicy x = (if (is_addLR (fst x)) + then \allow ()\ + else \)" + +definition LR_Policy where "LR_Policy = createSCRPolicy \ addLRPolicy \ LRPolicy \ A\<^sub>U" + +lemmas LRsimps = LR_Policy_def createSCRPolicy_def addLRPolicy_def LRPolicy_def + +type_synonym FunPolicy = "(Operation \ DB \ \,unit) policy" + +fun createFunPolicy :: FunPolicy where + "createFunPolicy ((createSCR u r p),(D,S)) = (if p \ dom D + then \deny ()\ + else \allow ()\)" + |"createFunPolicy x = \" + +fun addLRFunPolicy :: FunPolicy where + "addLRFunPolicy ((addLR u r p l us),(D,S)) = (if l \ dom S + then \deny ()\ + else \allow ()\)" + |"addLRFunPolicy x = \" + +fun removeLRFunPolicy :: FunPolicy where + "removeLRFunPolicy ((removeLR u r p l),(D,S)) = (if l \ dom S + then \allow ()\ + else \deny ()\)" + |"removeLRFunPolicy x = \" + +fun readSCRFunPolicy :: FunPolicy where + "readSCRFunPolicy ((readSCR u r p),(D,S)) = (if p \ dom D + then \allow ()\ + else \deny ()\)" + |"readSCRFunPolicy x = \" + +fun deleteSCRFunPolicy :: FunPolicy where + "deleteSCRFunPolicy ((deleteSCR u r p),(D,S)) = (if p \ dom D + then \allow ()\ + else \deny ()\)" + |"deleteSCRFunPolicy x = \" + +fun changeStatusFunPolicy :: FunPolicy where + "changeStatusFunPolicy (changeStatus u r p e s,(d,S)) = + (case d p of \x\ \ (if e \ dom x + then \allow ()\ + else \deny ()\) + | _ \ \deny ()\)" + |"changeStatusFunPolicy x = \" + +fun deleteEntryFunPolicy :: FunPolicy where + "deleteEntryFunPolicy (deleteEntry u r p e,(d,S)) = + (case d p of \x\ \ (if e \ dom x + then \allow ()\ + else \deny ()\) + | _ \ \deny ()\)" + |"deleteEntryFunPolicy x = \" + +fun readEntryFunPolicy :: FunPolicy where + "readEntryFunPolicy (readEntry u r p e,(d,S)) = + (case d p of \x\ \ (if e \ dom x + then \allow ()\ + else \deny ()\) + | _ \ \deny ()\)" + |"readEntryFunPolicy x = \" + +fun appendEntryFunPolicy :: FunPolicy where + "appendEntryFunPolicy (appendEntry u r p e ed,(d,S)) = + (case d p of \x\ \ (if e \ dom x + then \deny ()\ + else \allow ()\) + | _ \ \deny ()\)" + |"appendEntryFunPolicy x = \" + +fun editEntryFunPolicy :: FunPolicy where + "editEntryFunPolicy (editEntry u r p ei e,(d,S)) = + (case d p of \x\ \ (if ei \ dom x + then \allow ()\ + else \deny ()\) + | _ \ \deny ()\)" + |"editEntryFunPolicy x = \" + +definition FunPolicy where + "FunPolicy = editEntryFunPolicy \ appendEntryFunPolicy \ + readEntryFunPolicy \ deleteEntryFunPolicy \ + changeStatusFunPolicy \ deleteSCRFunPolicy \ + removeLRFunPolicy \ readSCRFunPolicy \ + addLRFunPolicy \ createFunPolicy \ A\<^sub>U" + +subsection{* Modelling Core RBAC *} + +type_synonym RBACPolicy = "Operation \ \ \ unit" + +definition RBAC :: "(role \ Operation) set" where + "RBAC = {(r,f). r = Nurse \ is_readEntry f} \ + {(r,f). r = Nurse \ is_readSCR f} \ + {(r,f). r = ClinicalPractitioner \ is_appendEntry f} \ + {(r,f). r = ClinicalPractitioner \ is_deleteEntry f} \ + {(r,f). r = ClinicalPractitioner \ is_readEntry f} \ + {(r,f). r = ClinicalPractitioner \ is_readSCR f} \ + {(r,f). r = ClinicalPractitioner \ is_changeStatus f} \ + {(r,f). r = ClinicalPractitioner \ is_editEntry f} \ + {(r,f). r = Clerical \ is_createSCR f} \ + {(r,f). r = Clerical \ is_deleteSCR f} \ + {(r,f). r = Clerical \ is_addLR f} \ + {(r,f). r = Clerical \ is_removeLR f}" + +definition RBACPolicy :: RBACPolicy where + "RBACPolicy = (\ (f,uc). + if ((roleOfOp f,f) \ RBAC \ \roleOfOp f\ = uc (userOfOp f)) + then \allow ()\ + else \deny ()\)" + +section {* The State Transitions and Output Function*} + +subsection{* State Transition *} + +fun OpSuccessDB :: "(Operation \ DB) \ DB" where + "OpSuccessDB (createSCR u r p,S) = (case S p of \ \ \S(p\\)\ + | \x\ \ \S\)" + |"OpSuccessDB ((appendEntry u r p ei e),S) = + (case S p of \ \ \S\ + | \x\ \ ((if ei \ (dom x) + then \S\ + else \S(p \ x(ei\e))\)))" + |"OpSuccessDB ((deleteSCR u r p),S) = (Some (S(p:=\)))" + |"OpSuccessDB ((deleteEntry u r p ei),S) = + (case S p of \ \ \S\ + | \x\ \ Some (S(p\(x(ei:=\)))))" + |"OpSuccessDB ((changeStatus u r p ei s),S) = + (case S p of \ \ \S\ + | \x\ \ (case x ei of + \e\ \ \S(p \ x(ei\(s,snd e)))\ + | \ \ \S\))" + |"OpSuccessDB ((editEntry u r p ei e),S) = + (case S p of \ \\S\ + | \x\ \ (case x ei of + \e\ \ \S(p\(x(ei\(e))))\ + | \ \ \S\))" + |"OpSuccessDB (x,S) = \S\" + + +fun OpSuccessSigma :: "(Operation \ \) \ \" where + "OpSuccessSigma (addLR u r p lr_id us,S) = + (case S p of \lrs\ \ (case (lrs lr_id) of + \ \ \S(p\(lrs(lr_id\us)))\ + | \x\ \ \S\) + | \ \ \S(p\(empty(lr_id\us)))\)" + |"OpSuccessSigma (removeLR u r p lr_id,S) = + (case S p of Some lrs \ \S(p\(lrs(lr_id:=\)))\ + | \ \ \S\)" + |"OpSuccessSigma (x,S) = \S\" + + + +fun OpSuccessUC :: "(Operation \ \) \ \" where + "OpSuccessUC (f,u) = \u\" + +subsection {* Output *} + +type_synonym Output = unit + +fun OpSuccessOutput :: "(Operation) \ Output" where + "OpSuccessOutput x = \()\" + + +fun OpFailOutput :: "Operation \ Output" where + "OpFailOutput x = \()\" + +section {* Combine All Parts *} + +definition SE_LR_Policy :: "(Operation \ DB \ \, unit) policy" where + "SE_LR_Policy = (\(x,x). x) o\<^sub>f (SEPolicy \\<^sub>\\<^sub>D LR_Policy) o (\(a,b,c). ((a,b),a,c))" + +definition SE_LR_FUN_Policy :: "(Operation \ DB \ \, unit) policy" where + "SE_LR_FUN_Policy = ((\(x,x). x) o\<^sub>f (FunPolicy \\<^sub>\\<^sub>D SE_LR_Policy) o (\a. (a,a)))" + +definition SE_LR_RBAC_Policy :: "(Operation \ DB \ \ \ \, unit) policy" where + "SE_LR_RBAC_Policy = (\(x,x). x) + o\<^sub>f (RBACPolicy \\<^sub>\\<^sub>D SE_LR_FUN_Policy) + o (\(a,b,c,d). ((a,d),(a,b,c)))" + +definition ST_Allow :: "Operation \ DB \ \ \ \ \ Output \ DB \ \ \ \" +where "ST_Allow = ((OpSuccessOutput \\<^sub>M (OpSuccessDB \\<^sub>S OpSuccessSigma \\<^sub>S OpSuccessUC)) + o ( (\(a,b,c). ((a),(a,b,c)))))" + + +definition ST_Deny :: "Operation \ DB \ \ \ \ \ Output \ DB \ \ \ \" +where "ST_Deny = (\ (ope,sp,si,uc). Some ((), sp,si,uc))" + + +definition SE_LR_RBAC_ST_Policy :: "Operation \ DB \ \ \ \ \ Output \ DB \ \ \ \" +where "SE_LR_RBAC_ST_Policy = ((\ (x,y).y) + o\<^sub>f ((ST_Allow,ST_Deny) \\<^sub>\ SE_LR_RBAC_Policy) + o (\x.(x,x)))" + +definition PolMon :: "Operation \ (Output decision,DB \ \ \ \) MON\<^sub>S\<^sub>E" +where "PolMon = (policy2MON SE_LR_RBAC_ST_Policy)" + +end diff --git a/ServiceExample.thy b/ServiceExample.thy new file mode 100644 index 0000000..5260836 --- /dev/null +++ b/ServiceExample.thy @@ -0,0 +1,135 @@ +(***************************************************************************** + * HOL-TestGen --- theorem-prover based test case generation + * http://www.brucker.ch/projects/hol-testgen/ + * + * This file is part of HOL-TestGen. + * + * Copyright (c) 2010-2012 ETH Zurich, Switzerland + * 2010-2014 Achim D. Brucker, Germany + * 2010-2014 Université Paris-Sud, France + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * + * * Neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + ******************************************************************************) +(* $Id: ServiceExample.thy 10954 2014-11-24 12:43:29Z wolff $ *) + +header {* Instantiating Our Secure Service Example *} +theory + ServiceExample +imports + Service +begin +text {* + In the following, we briefly present an instantiations of our secure service example + from the last section. We assume three different members of the health care staff and + two patients: +*} + +section {* Access Control Configuration *} +definition alice :: user where "alice = 1" +definition bob :: user where "bob = 2" +definition charlie :: user where "charlie = 3" +definition patient1 :: patient where "patient1 = 5" +definition patient2 :: patient where "patient2 = 6" + +definition UC0 :: \ where + "UC0 = empty(alice\Nurse)(bob\ClinicalPractitioner)(charlie\Clerical)" + +definition entry1 :: entry where + "entry1 = (Open,alice, dummyContent)" + +definition entry2 :: entry where + "entry2 = (Closed,bob, dummyContent)" + +definition entry3 :: entry where + "entry3 = (Closed,alice, dummyContent)" + +definition SCR1 :: SCR where + "SCR1 = (Map.empty(1\entry1))" + +definition SCR2 :: SCR where + "SCR2 = (Map.empty)" + +definition Spine0 :: DB where + "Spine0 = empty(patient1\SCR1)(patient2\SCR2)" + +definition LR1 :: LR where + "LR1 =(empty(1\{alice}))" + +definition \0 :: \ where + "\0 = (empty(patient1\LR1))" + +section {* The Initial System State *} +definition \0 :: "DB \ \\\" where + "\0 = (Spine0,\0,UC0)" + +section{* Basic Properties *} + +lemma [simp]: "(case a of allow d \ \X\ | deny d2 \ \Y\) = \ \ False" + by (case_tac a,simp_all) + + +lemma [cong,simp]: + "((if hasLR urp1_alice 1 \0 then \allow ()\ else \deny ()\) = \) = False" +by (simp) + +lemmas MonSimps = valid_SE_def unit_SE_def bind_SE_def +lemmas Psplits = option.splits unit.splits prod.splits decision.splits +lemmas PolSimps = valid_SE_def unit_SE_def bind_SE_def if_splits policy2MON_def + SE_LR_RBAC_ST_Policy_def map_add_def id_def LRsimps prod_2_def RBACPolicy_def + SE_LR_Policy_def SEPolicy_def RBAC_def deleteEntrySE_def editEntrySE_def + readEntrySE_def \0_def \0_def UC0_def patient1_def patient2_def LR1_def + alice_def bob_def charlie_def get_entry_def SE_LR_RBAC_Policy_def Allow_def + Deny_def dom_restrict_def policy_range_comp_def prod_orA_def prod_orD_def + ST_Allow_def ST_Deny_def Spine0_def SCR1_def SCR2_def entry1_def entry2_def + entry3_def FunPolicy_def SE_LR_FUN_Policy_def o_def image_def UPFDefs + +lemma "SE_LR_RBAC_Policy ((createSCR alice Clerical patient1),\0)= Some (deny ())" +by (simp add: PolSimps) + +lemma exBool[simp]: "\a\bool. a" by auto + +lemma deny_allow[simp]: " \deny ()\ \ Some ` range allow" by auto + +lemma allow_deny[simp]: " \allow ()\ \ Some ` range deny" by auto + +text{* Policy as monad. Alice using her first urp can read the SCR of patient1. *} +lemma +"(\0 \ (os \ mbind [(createSCR alice Clerical patient1)] (PolMon); + (return (os = [(deny (Out) )]))))" +by (simp add: PolMon_def MonSimps PolSimps) + +text{* Presenting her other urp, she is not allowed to read it. *} +lemma "SE_LR_RBAC_Policy ((appendEntry alice Clerical patient1 ei d),\0)= \deny ()\" +by (simp add: PolSimps) + +end + + diff --git a/UPF.thy b/UPF.thy new file mode 100644 index 0000000..01136d3 --- /dev/null +++ b/UPF.thy @@ -0,0 +1,59 @@ +(***************************************************************************** + * HOL-TestGen --- theorem-prover based test case generation + * http://www.brucker.ch/projects/hol-testgen/ + * + * UPF + * This file is part of HOL-TestGen. + * + * Copyright (c) 2005-2012 ETH Zurich, Switzerland + * 2008-2014 Achim D. Brucker, Germany + * 2009-2014 Université Paris-Sud, France + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * + * * Neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + ******************************************************************************) +(* $Id: UPF.thy 10879 2014-10-26 11:35:31Z brucker $ *) + +header {* Putting Everything Together: UPF *} +theory + UPF +imports + Normalisation + NormalisationTestSpecification + Analysis +begin + +text{* + This is the top-level theory for the Unified Policy Framework (UPF) and, thus, + builds the base theory for using UPF. For the moment, we only define a set of + lemmas for all core UPF definitions that is useful for using UPF: +*} +lemmas UPFDefs = UPFCoreDefs ParallelDefs ElementaryPoliciesDefs +end diff --git a/UPFCore.thy b/UPFCore.thy new file mode 100644 index 0000000..5df141b --- /dev/null +++ b/UPFCore.thy @@ -0,0 +1,400 @@ +(***************************************************************************** + * HOL-TestGen --- theorem-prover based test case generation + * http://www.brucker.ch/projects/hol-testgen/ + * + * UPF + * This file is part of HOL-TestGen. + * + * Copyright (c) 2005-2012 ETH Zurich, Switzerland + * 2008-2014 Achim D. Brucker, Germany + * 2009-2014 Université Paris-Sud, France + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * + * * Neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + ******************************************************************************) +(* $Id: UPFCore.thy 10951 2014-11-21 21:54:46Z wolff $ *) + +header{* The Core of the Unified Policy Framework (UPF) *} +theory + UPFCore +imports + Monads +begin + + +section{* Foundation *} +text{* + The purpose of this theory is to formalize a somewhat non-standard view + on the fundamental concept of a security policy which is worth outlining. + This view has arisen from prior experience in the modelling of network + (firewall) policies. Instead of regarding policies as relations on resources, + sets of permissions, etc., we emphasise the view that a policy is a policy + decision function that grants or denies access to resources, permissions, etc. + In other words, we model the concrete function that implements the policy + decision point in a system, and which represents a "wrapper" around the + business logic. An advantage of this view is that it is compatible + with many different policy models, enabling a uniform modelling framework to be + defined. Furthermore, this function is typically a large cascade of nested + conditionals, using conditions referring to an internal state and security + contexts of the system or a user. This cascade of conditionals can easily be + decomposed into a set of test cases similar to transformations used for binary + decision diagrams (BDD), and motivate equivalence class testing for unit test and + sequence test scenarios. From the modelling perspective, using \HOL as + its input language, we will consequently use the expressive power of its + underlying functional programming language, including the possibility to + define higher-order combinators. + + In more detail, we model policies as partial functions based on input + data $\alpha$ (arguments, system state, security context, ...) to output + data $\beta$: +*} + +datatype '\ decision = allow '\ | deny '\ + +type_synonym ('\,'\) policy = "'\ \ '\ decision" (infixr "|->" 0) + +text{*In the following, we introduce a number of shortcuts and alternative notations. +The type of policies is represented as: *} + +translations (type) "'\ |-> '\" <= (type) "'\ \ '\ decision" +type_notation (xsymbols) "policy" (infixr "\" 0) + +text{* ... allowing the notation @{typ "'\ \ '\"} for the policy type and the +alternative notations for @{term None} and @{term Some} of the \HOL library +@{typ "'\ option"} type:*} + +notation "None" ("\") +notation "Some" ("\_\" 80) + +text{* Thus, the range of a policy may consist of @{term "\accept x\"} data, + of @{term "\deny x\"} data, as well as @{term "\"} modeling the undefinedness + of a policy, i.e. a policy is considered as a partial function. Partial + functions are used since we describe elementary policies by partial system + behaviour, which are glued together by operators such as function override and + functional composition. +*} + +text{* We define the two fundamental sets, the allow-set $Allow$ and the + deny-set $Deny$ (written $A$ and $D$ set for short), to characterize these + two main sets of the range of a policy. +*} +definition Allow :: "('\ decision) set" +where "Allow = range allow" + +definition Deny :: "('\ decision) set" +where "Deny = range deny" + + +section{* Policy Constructors *} +text{* + Most elementary policy constructors are based on the + update operation @{thm [source] "Fun.fun_upd_def"} @{thm Fun.fun_upd_def} + and the maplet-notation @{term "a(x \ y)"} used for @{term "a(x:=\y\)"}. + + Furthermore, we add notation adopted to our problem domain: *} + +nonterminal policylets and policylet + +syntax + "_policylet1" :: "['a, 'a] => policylet" ("_ /+=/ _") + "_policylet2" :: "['a, 'a] => policylet" ("_ /-=/ _") + "" :: "policylet => policylets" ("_") + "_Maplets" :: "[policylet, policylets] => policylets" ("_,/ _") + "_Maplets" :: "[policylet, policylets] => policylets" ("_,/ _") + "_MapUpd" :: "['a |-> 'b, policylets] => 'a |-> 'b" ("_/'(_')" [900,0]900) + +syntax (xsymbols) + "_policylet1" :: "['a, 'a] => policylet" ("_ /\\<^sub>+/ _") + "_policylet2" :: "['a, 'a] => policylet" ("_ /\\<^sub>-/ _") + "_emptypolicy" :: "'a |-> 'b" ("\") + +translations + "_MapUpd m (_Maplets xy ms)" \ "_MapUpd (_MapUpd m xy) ms" + "_MapUpd m (_policylet1 x y)" \ "m(x := CONST Some (CONST allow y))" + "_MapUpd m (_policylet2 x y)" \ "m(x := CONST Some (CONST deny y))" + "\" \ "CONST empty" + +text{* Here are some lemmas essentially showing syntactic equivalences: *} +lemma test: "empty(x+=a, y-= b) = \(x \\<^sub>+ a, y \\<^sub>- b)" by simp + +lemma test2: "p(x\\<^sub>+a,x\\<^sub>-b) = p(x\\<^sub>-b)" by simp + +text{* + We inherit a fairly rich theory on policy updates from Map here. Some examples are: +*} + +lemma pol_upd_triv1: "t k = \allow x\ \ t(k\\<^sub>+x) = t" + by (rule ext) simp + +lemma pol_upd_triv2: "t k = \deny x\ \ t(k\\<^sub>-x) = t" + by (rule ext) simp + +lemma pol_upd_allow_nonempty: "t(k\\<^sub>+x) \ \" + by simp + +lemma pol_upd_deny_nonempty: "t(k\\<^sub>-x) \ \" + by simp + +lemma pol_upd_eqD1 : "m(a\\<^sub>+x) = n(a\\<^sub>+y) \ x = y" + by(auto dest: map_upd_eqD1) + +lemma pol_upd_eqD2 : "m(a\\<^sub>-x) = n(a\\<^sub>-y) \ x = y" + by(auto dest: map_upd_eqD1) + +lemma pol_upd_neq1 [simp]: "m(a\\<^sub>+x) \ n(a\\<^sub>-y)" + by(auto dest: map_upd_eqD1) + + +section{* Override Operators *} +text{* + Key operators for constructing policies are the override operators. There are four different + versions of them, with one of them being the override operator from the Map theory. As it is + common to compose policy rules in a ``left-to-right-first-fit''-manner, that one is taken as + default, defined by a syntax translation from the provided override operator from the Map + theory (which does it in reverse order). +*} + +syntax + "_policyoverride" :: "['a \ 'b, 'a \ 'b] \ 'a \ 'b" (infixl "(+p/)" 100) + +syntax (xsymbols) + "_policyoverride" :: "['a \ 'b, 'a \ 'b] \ 'a \ 'b" (infixl "\" 100) + +translations + "p \ q" \ "q ++ p" + + +text{* + Some elementary facts inherited from Map are: +*} + +lemma override_empty: "p \ \ = p" + by simp + +lemma empty_override: "\ \ p = p" + by simp + +lemma override_assoc: "p1 \ (p2 \ p3) = (p1 \ p2) \ p3" + by simp + +text{* + The following two operators are variants of the standard override. For override\_A, + an allow of wins over a deny. For override\_D, the situation is dual. +*} + +definition override_A :: "['\\'\, '\\'\] \ '\\'\" (infixl "++'_A" 100) +where "m2 ++_A m1 = + (\x. (case m1 x of + \allow a\ \ \allow a\ + | \deny a\ \ (case m2 x of \allow b\ \ \allow b\ + | _ \ \deny a\) + | \ \ m2 x) + )" + +syntax (xsymbols) + "_policyoverride_A" :: "['a \ 'b, 'a \ 'b] \ 'a \ 'b" (infixl "\\<^sub>A" 100) + +translations + "p \\<^sub>A q" \ "p ++_A q" + +lemma override_A_empty[simp]: "p \\<^sub>A \ = p" + by(simp add:override_A_def) + +lemma empty_override_A[simp]: "\ \\<^sub>A p = p" + apply (rule ext) + apply (simp add:override_A_def) + apply (case_tac "p x") + apply (simp_all) + apply (case_tac a) + apply (simp_all) +done + + +lemma override_A_assoc: "p1 \\<^sub>A (p2 \\<^sub>A p3) = (p1 \\<^sub>A p2) \\<^sub>A p3" + by (rule ext, simp add: override_A_def split: decision.splits option.splits) + +definition override_D :: "['\\'\, '\\'\] \ '\\'\" (infixl "++'_D" 100) +where "m1 ++_D m2 = + (\x. case m2 x of + \deny a\ \ \deny a\ + | \allow a\ \ (case m1 x of \deny b\ \ \deny b\ + | _ \ \allow a\) + | \ \ m1 x + )" + +syntax (xsymbols) + "_policyoverride_D" :: "['a \ 'b, 'a \ 'b] \ 'a \ 'b" (infixl "\\<^sub>D" 100) +translations + "p \\<^sub>D q" \ "p ++_D q" + +lemma override_D_empty[simp]: "p \\<^sub>D \ = p" + by(simp add:override_D_def) + +lemma empty_override_D[simp]: "\ \\<^sub>D p = p" + apply (rule ext) + apply (simp add:override_D_def) + apply (case_tac "p x", simp_all) + apply (case_tac a, simp_all) +done + +lemma override_D_assoc: "p1 \\<^sub>D (p2 \\<^sub>D p3) = (p1 \\<^sub>D p2) \\<^sub>D p3" + apply (rule ext) + apply (simp add: override_D_def split: decision.splits option.splits) +done + +section{* Coercion Operators *} +text{* + Often, especially when combining policies of different type, it is necessary to + adapt the input or output domain of a policy to a more refined context. +*} + +text{* + An analogous for the range of a policy is defined as follows: +*} + +definition policy_range_comp :: "['\\'\, '\\'\] \ '\ \'\" (infixl "o'_f" 55) +where + "f o_f p = (\x. case p x of + \allow y\ \ \allow (f y)\ + | \deny y\ \ \deny (f y)\ + | \ \ \)" + +syntax (xsymbols) + "_policy_range_comp" :: "['\\'\, '\\'\] \ '\ \'\" (infixl "o\<^sub>f" 55) + +translations + "p o\<^sub>f q" \ "p o_f q" + +lemma policy_range_comp_strict : "f o\<^sub>f \ = \" + apply (rule ext) + apply (simp add: policy_range_comp_def) +done + + +text{* + A generalized version is, where separate coercion functions are applied to the result + depending on the decision of the policy is as follows: +*} + +definition range_split :: "[('\\'\)\('\\'\),'\ \ '\] \ '\ \ '\" + (infixr "\" 100) +where "(P) \ p = (\x. case p x of + \allow y\ \ \allow ((fst P) y)\ + | \deny y\ \ \deny ((snd P) y)\ + | \ \ \)" + +lemma range_split_strict[simp]: "P \ \ = \" + apply (rule ext) + apply (simp add: range_split_def) +done + + +lemma range_split_charn: + "(f,g) \ p = (\x. case p x of + \allow x\ \ \allow (f x)\ + | \deny x\ \ \deny (g x)\ + | \ \ \)" + apply (simp add: range_split_def) + apply (rule ext) + apply (case_tac "p x") + apply (simp_all) + apply (case_tac "a") + apply (simp_all) +done + +text{* + The connection between these two becomes apparent if considering the following lemma: +*} + +lemma range_split_vs_range_compose: "(f,f) \ p = f o\<^sub>f p" + by(simp add: range_split_charn policy_range_comp_def) + +lemma range_split_id [simp]: "(id,id) \ p = p" + apply (rule ext) + apply (simp add: range_split_charn id_def) + apply (case_tac "p x") + apply (simp_all) + apply (case_tac "a") + apply (simp_all) +done + +lemma range_split_bi_compose [simp]: "(f1,f2) \ (g1,g2) \ p = (f1 o g1,f2 o g2) \ p" + apply (rule ext) + apply (simp add: range_split_charn comp_def) + apply (case_tac "p x") + apply (simp_all) + apply (case_tac "a") + apply (simp_all) +done + + +text{* + The next three operators are rather exotic and in most cases not used. +*} + +text {* + The following is a variant of range\_split, where the change in the decision depends + on the input instead of the output. +*} + +definition dom_split2a :: "[('\ \ '\) \ ('\ \'\),'\ \ '\] \ '\ \ '\" (infixr "\a" 100) +where "P \a p = (\x. case p x of + \allow y\ \ \allow (the ((fst P) x))\ + | \deny y\ \ \deny (the ((snd P) x))\ + | \ \ \)" + +definition dom_split2 :: "[('\ \ '\) \ ('\ \'\),'\ \ '\] \ '\ \ '\" (infixr "\" 100) +where "P \ p = (\x. case p x of + \allow y\ \ \allow ((fst P) x)\ + | \deny y\ \ \deny ((snd P) x)\ + | \ \ \)" + +definition range_split2 :: "[('\ \ '\) \ ('\ \'\),'\ \ '\] \ '\ \ ('\ \'\)" (infixr "\2" 100) +where "P \2 p = (\x. case p x of + \allow y\ \ \allow (y,(fst P) x)\ + | \deny y\ \ \deny (y,(snd P) x)\ + | \ \ \)" + +text{* + The following operator is used for transition policies only: a transition policy is transformed + into a state-exception monad. Such a monad can for example be used for test case generation using + HOL-Testgen~\cite{brucker.ea:theorem-prover:2012}. +*} + +definition policy2MON :: "('\\'\ \ 'o\'\) \ ('\ \('o decision,'\) MON\<^sub>S\<^sub>E)" +where "policy2MON p = (\ \ \. case p (\,\) of + \(allow (outs,\'))\ \ \(allow outs, \')\ + | \(deny (outs,\'))\ \ \(deny outs, \')\ + | \ \ \)" + +lemmas UPFCoreDefs = Allow_def Deny_def override_A_def override_D_def policy_range_comp_def + range_split_def dom_split2_def map_add_def restrict_map_def +end + diff --git a/config b/config new file mode 100644 index 0000000..5614d14 --- /dev/null +++ b/config @@ -0,0 +1,9 @@ +# -*- shell-script -*- + +# Get email when automated build fails. May be empty. +# values: "email1 email2 .. emailn" +NOTIFY="adbrucker@0x5f.org wolff@lri.fr lukas.a.bruegger@gmail.com" + +# Participate in frequent (nightly) build (only for small submissions) +# values: "yes" "no" +FREQUENT="no" diff --git a/document/auto/root.el b/document/auto/root.el new file mode 100644 index 0000000..7fbeb3b --- /dev/null +++ b/document/auto/root.el @@ -0,0 +1,25 @@ +(TeX-add-style-hook "root" + (lambda () + (LaTeX-add-bibitems + "brucker.ea:formal-fw-testing:2014" + "brucker.ea:hol-testgen-fw:2013" + "brucker.ea:model-based:2011" + "brucker.ea:theorem-prover:2012" + "bruegger:generation:2012" + "barker:next:2009" + "sandhu.ea:role-based:1996" + "wainer.ea:dw-rbac:2007" + "sandhu.ea:nist:2000" + "samuel.ea:context-aware:2008" + "bertino.ea:trbac:2001" + "moyer.ea:generalized:2001" + "bell.ea:secure:1996" + "bell:looking:2005" + "oasis:xacml:2005" + "ferreira.ea:how:2009" + "ansi:rbac:2004" + "li.ea:critique:2007" + "ardagna.ea:access:2010" + "sandhu.ea:arbac97:1999" + "becker:information:2007"))) + diff --git a/document/conclusion.tex b/document/conclusion.tex new file mode 100644 index 0000000..a503eb6 --- /dev/null +++ b/document/conclusion.tex @@ -0,0 +1,40 @@ +\chapter{Conclusion and Related Work} +\section{Related Work} +With \citet{barker:next:2009}, our UPF shares the observation +that a broad range of access control models can be reduced to a +surprisingly small number of primitives together with a set of +combinators or relations to build more complex policies. We also share +the vision that the semantics of access control models should be +formally defined. In contrast to \cite{barker:next:2009}, UPF +uses higher-order constructs and, more importantly, is geared towards +machine support for (formally) transforming policies and supporting +model-based test case generation approaches. + +\section{Conclusion Future Work} +We have presented a uniform framework for modelling security +policies. This might be regarded as merely an interesting academic +exercise in the art of abstraction, especially given the fact that +underlying core concepts are logically equivalent, but presented +remarkably different from---apparently simple---security textbook +formalisations. However, we have successfully used the framework to +model fully the large and complex information governance policy of a +national health-care record system as described in the official +documents~\cite{brucker.ea:model-based:2011} as well as network +policies~\cite{brucker.ea:formal-fw-testing:2014}. Thus, we have shown +the framework being able to accommodate relatively conventional +RBAC~\cite{sandhu.ea:role-based:1996} mechanisms alongside less common +ones such as Legitimate Relationships. These security concepts are +modelled separately and combined into one global access control +mechanism. Moreover, we have shown the practical relevance of our +model by using it in our test generation system +\testgen~\cite{brucker.ea:theorem-prover:2012}, translating informal +security requirements into formal test specifications to be processed +to test sequences for a distributed system consisting of applications +accessing a central record storage system. + +Besides applying our framework to other access control models, we plan +to develop specific test case generation algorithms. Such +domain-specific algorithms allow, by exploiting knowledge about the +structure of access control models, respectively the UPF, for a +deeper exploration of the test space. Finally, this results in an +improved test coverage. diff --git a/document/example-intro.tex b/document/example-intro.tex new file mode 100644 index 0000000..b30dc91 --- /dev/null +++ b/document/example-intro.tex @@ -0,0 +1,10 @@ +In this chapter, we present a small example application of UPF for +modeling access control for a Web service that might be used in a +hospital. This scenario is motivated by our formalization of the NHS +system~\cite{bruegger:generation:2012,brucker.ea:model-based:2011}. + +UPF was also successfully used for modeling network security policies +such as the ones enforced by +firewalls~\cite{bruegger:generation:2012,brucker.ea:formal-fw-testing:2014}. These +models were also used for generating test cases using +HOL-TestGen~\cite{brucker.ea:theorem-prover:2012}. diff --git a/document/introduction.tex b/document/introduction.tex new file mode 100644 index 0000000..deb54ef --- /dev/null +++ b/document/introduction.tex @@ -0,0 +1,55 @@ +\chapter{Introduction} +Access control, \ie, restricting the access to information or +resources, is an important pillar of today's information security +portfolio. Thus the large number of access control models +(\eg,~\cite{moyer.ea:generalized:2001,sandhu.ea:arbac97:1999, + sandhu.ea:nist:2000,ansi:rbac:2004,bell:looking:2005,bell.ea:secure:1996,oasis:xacml:2005,li.ea:critique:2007}) +and variants thereof +(\eg,~\cite{ferreira.ea:how:2009,wainer.ea:dw-rbac:2007,ardagna.ea:access:2010,samuel.ea:context-aware:2008,bertino.ea:trbac:2001,ardagna.ea:access:2010,becker:information:2007}) +is not surprising. On the one hand, this variety of specialized access +control models allows concise representation of access control +policies. On the other hand, the lack of a common foundations makes it +difficult to compare and analyze different access control models +formally. + +We present formalization of the Unified Policy Framework +(UPF)~\cite{bruegger:generation:2012} that provides a formal semantics +for the core concepts of access control policiesb. It can serve as a +meta-model for a large set of well-known access control +policies and moreover, serve as a framework for analysis and test +generation tools addressing common ground in policy models. +Thus, UPF for comparing different access control models, +including a formal correctness proof of a specific embedding, for +example, implementing a role-based access control policy in terms of a +discretionary access enforcement architecture. Moreover, defining +well-known access control models by instantiating a unified policy +framework allows to re-use tools, such as test-case generators, that +are already provided for the unified policy framework. As the +instantiation of a unified policy framework may also define a +domain-specific (\ie, access control model specific) set of policy +combinators (syntax), such an approach still provides the usual +notations and thus a concise representation of access control +policies. + +UPF was already successful used as a basis for large scale access +control policies in the health care +domain~\cite{brucker.ea:model-based:2011} as well as in the domain of +firewall and router +policies~\cite{brucker.ea:formal-fw-testing:2014}. In both domains, +the formal policy specifications served as basis for the generation, +using {HOL-TestGen}~\cite{brucker.ea:theorem-prover:2012}, of test +cases that can be used for validating the compliance of an +implementation to the formal model. UPF is based on the following four +principles: +\begin{enumerate} +\item policies are represented as \emph{functions} (rather than relations), +\item policy combination avoids conflicts by construction, +\item the decision type is three-valued (allow, deny, undefined), +\item the output type does not only contain the decision but also a + `slot' for arbitrary result data. +\end{enumerate} + +UPF is related to the state-exception monad modeling failing computations; +in some cases our UPF model makes explicit use of this connection, although it +is not central. The used theory for state-exception monads can be found in the +appendix. diff --git a/document/root.bib b/document/root.bib new file mode 100644 index 0000000..520d028 --- /dev/null +++ b/document/root.bib @@ -0,0 +1,701 @@ +@PREAMBLE{ {\providecommand{\ac}[1]{\textsc{#1}} } + # {\providecommand{\acs}[1]{\textsc{#1}} } + # {\providecommand{\acf}[1]{\textsc{#1}} } + # {\providecommand{\TAP}{T\kern-.1em\lower-.5ex\hbox{A}\kern-.1em P} } + # {\providecommand{\leanTAP}{\mbox{\sf lean\it\TAP}} } + # {\providecommand{\holz}{\textsc{hol-z}} } + # {\providecommand{\holocl}{\textsc{hol-ocl}} } + # {\providecommand{\isbn}{\textsc{isbn}} } + # {\providecommand{\Cpp}{C++} } + # {\providecommand{\Specsharp}{Spec\#} } + # {\providecommand{\doi}[1]{\href{http://dx.doi.org/#1}{doi: + {\urlstyle{rm}\nolinkurl{#1}}}}} } +@STRING{conf-sacmat="ACM symposium on access control models and technologies + (SACMAT)" } +@STRING{j-computer="Computer" } +@STRING{j-fac = "Formal Aspects of Computing (FAC)" } +@STRING{j-stvr = "Software Testing, Verification \& Reliability (STVR)" } +@STRING{j-tissec= "ACM Transactions on Information and System Security" } +@STRING{proc = "Proceedings of the " } +@STRING{pub-acm = {ACM Press} } +@STRING{pub-acm:adr={New York, NY USA} } +@STRING{pub-elsevier={Elsevier Science Publishers} } +@STRING{pub-ieee= {IEEE Computer Society} } +@STRING{pub-ieee:adr={Los Alamitos, CA, USA} } +@STRING{pub-springer={Springer-Verlag} } +@STRING{pub-wiley={John Wiley \& Sons} } +@STRING{s-lncs = "Lecture Notes in Computer Science" } + +@Article{ brucker.ea:formal-fw-testing:2014, + abstract = {Firewalls are an important means to secure critical ICT + infrastructures. As configurable off-the-shelf prod\-ucts, + the effectiveness of a firewall crucially depends on both + the correctness of the implementation itself as well as the + correct configuration. While testing the implementation can + be done once by the manufacturer, the configuration needs + to be tested for each application individually. This is + particularly challenging as the configuration, implementing + a firewall policy, is inherently complex, hard to + understand, administrated by different stakeholders and + thus difficult to validate. This paper presents a formal + model of both stateless and stateful firewalls (packet + filters), including NAT, to which a specification-based + conformance test case gen\-eration approach is applied. + Furthermore, a verified optimisation technique for this + approach is presented: starting from a formal model for + stateless firewalls, a collection of semantics-preserving + policy transformation rules and an algorithm that optimizes + the specification with respect of the number of test cases + required for path coverage of the model are derived. We + extend an existing approach that integrates verification + and testing, that is, tests and proofs to support + conformance testing of network policies. The presented + approach is supported by a test framework that allows to + test actual firewalls using the test cases generated on the + basis of the formal model. Finally, a report on several + larger case studies is presented.}, + address = {pub-wiley:adr}, + author = {Achim D. Brucker and Lukas Br{\"u}gger and Burkhart Wolff}, + doi = {10.1002/stvr.1544}, + journal = {Software Testing, Verification \& Reliability (STVR)}, + keywords = {model-based testing; conformance testing; security + testing; firewall; specification-based testing; testing + cloud infrastructure, transformation for testability; + HOL-TestGen; test and proof; security configuration + testing}, + language = {USenglish}, + pdf = {http://www.brucker.ch/bibliography/download/2014/brucker.ea-formal-fw-testing-2014.pdf} + , + publisher = {pub-wiley}, + title = {Formal Firewall Conformance Testing: An Application of + Test and Proof Techniques}, + url = {http://www.brucker.ch/bibliography/abstract/brucker.ea-formal-fw-testing-2014} + , + year = {2014} +} + +@InCollection{ brucker.ea:hol-testgen-fw:2013, + abstract = {The HOL-TestGen environment is conceived as a system for + modeling and semi-automated test generation with an + emphasis on expressive power and generality. However, its + underlying technical framework Isabelle/HOL supports the + customization as well as the development of highly + automated add-ons working in specific application + domains.\\\\In this paper, we present HOL-TestGen/fw, an + add-on for the test framework HOL-TestGen, that allows for + testing the conformance of firewall implementations to + high-level security policies. Based on generic theories + specifying a security-policy language, we developed + specific theories for network data and firewall policies. + On top of these firewall specific theories, we provide + mechanisms for policy transformations based on derived + rules and adapted code-generators producing test drivers. + Our empirical evaluations shows that HOL-TestGen/fw is a + competitive environment for testing firewalls or high-level + policies of local networks.}, + address = {Heidelberg}, + author = {Achim D. Brucker and Lukas Br{\"u}gger and Burkhart Wolff}, + booktitle = {International Colloquium on Theoretical Aspects of + Computing (ICTAC)}, + doi = {10.1007/978-3-642-39718-9_7}, + editor = {Zhiming Liu and Jim Woodcock and Huibiao Zhu}, + isbn = {978-3-642-39717-2}, + keywords = {symbolic test case generations, black box testing, theorem + proving, network security, firewall testing, conformance + testing}, + language = {USenglish}, + location = {Shanghai}, + number = {8049}, + pages = {112--121}, + pdf = {http://www.brucker.ch/bibliography/download/2013/brucker.ea-hol-testgen-fw-2013.pdf} + , + publisher = {Springer-Verlag}, + series = {Lecture Notes in Computer Science}, + title = {{HOL-TestGen/FW:} An Environment for Specification-based + Firewall Conformance Testing}, + url = {http://www.brucker.ch/bibliography/abstract/brucker.ea-hol-testgen-fw-2013} + , + year = {2013} +} + +@InProceedings{ brucker.ea:model-based:2011, + abstract = {We present a generic modular policy modelling framework + and instantiate it with a substantial case study for + model-based testing of some key security mechanisms of + applications and services of the NPfIT. NPfIT, the National + Programme for IT, is a very large-scale development project + aiming to modernise the IT infrastructure of the NHS in + England. Consisting of heterogeneous and distributed + applications, it is an ideal target for model-based testing + techniques of a large system exhibiting critical security + features.\\\\We model the four information governance + principles, comprising a role-based access control model, + as well as policy rules governing the concepts of patient + consent, sealed envelopes and legitimate relationship. The + model is given in HOL and processed together with suitable + test specifications in the HOL-TestGen system, that + generates test sequences according to them. Particular + emphasis is put on the modular description of security + policies and their generic combination and its consequences + for model-based testing.}, + address = {New York, NY, USA}, + author = {Achim D. Brucker and Lukas Br{\"u}gger and Paul Kearney + and Burkhart Wolff}, + booktitle = {ACM symposium on access control models and technologies + (SACMAT)}, + copyright = {ACM}, + copyrighturl = {http://dl.acm.org/authorize?431936}, + doi = {10.1145/1998441.1998461}, + isbn = {978-1-4503-0688-1}, + language = {USenglish}, + location = {Innsbruck, Austria}, + pages = {133--142}, + pdf = {http://www.brucker.ch/bibliography/download/2011/brucker.ea-model-based-2011.pdf} + , + publisher = {ACM Press}, + title = {An Approach to Modular and Testable Security Models of + Real-world Health-care Applications}, + url = {http://www.brucker.ch/bibliography/abstract/brucker.ea-model-based-2011} + , + year = {2011} +} + +@Article{ brucker.ea:theorem-prover:2012, + abstract = {HOL-TestGen is a specification and test case generation + environment extending the interactive theorem prover + Isabelle/HOL. As such, HOL-TestGen allows for an integrated + workflow supporting interactive theorem proving, test case + generation, and test data generation.\\\\The HOL-TestGen + method is two-staged: first, the original formula is + partitioned into test cases by transformation into a normal + form called test theorem. Second, the test cases are + analyzed for ground instances (the test data) satisfying + the constraints of the test cases. Particular emphasis is + put on the control of explicit test-hypotheses which can be + proven over concrete programs.\\\\Due to the generality of + the underlying framework, our system can be used for + black-box unit, sequence, reactive sequence and white-box + test scenarios. Although based on particularly clean + theoretical foundations, the system can be applied for + substantial case-studies.}, + address = {Heidelberg}, + author = {Achim D. Brucker and Burkhart Wolff}, + doi = {10.1007/s00165-012-0222-y}, + issn = {0934-5043}, + journal = {Formal Aspects of Computing}, + keywords = {test case generation, domain partitioning, test sequence, + theorem proving, HOL-TestGen}, + language = {USenglish}, + number = {5}, + pages = {683--721}, + pdf = {http://www.brucker.ch/bibliography/download/2012/brucker.ea-theorem-prover-2012.pdf} + , + publisher = {Springer-Verlag}, + title = {On Theorem Prover-based Testing}, + url = {http://www.brucker.ch/bibliography/abstract/brucker.ea-theorem-prover-2012} + , + volume = {25}, + year = {2013} +} + +@PhDThesis{ bruegger:generation:2012, + author = {Lukas Br{\"u}gger}, + title = {A Framework for Modelling and Testing of Security + Policies}, + school = {ETH Zurich}, + year = {2012}, + categories = {holtestgen}, + note = {ETH Dissertation No. 20513.}, + public = yes, + pdf = {http://www.brucker.ch/bibliography/download/bruegger-generation-2012.pdf} + , + url = {http://www.brucker.ch/bibliography/abstract/bruegger-generation-2012} + +} + +@InProceedings{ barker:next:2009, + author = {Steve Barker}, + title = {The next 700 access control models or a unifying + meta-model?}, + booktitle = {Proceedings of the 14th ACM symposium on Access control + models and technologies}, + series = {SACMAT '09}, + year = 2009, + isbn = {978-1-60558-537-6}, + location = {Stresa, Italy}, + pages = {187--196}, + numpages = 10, + doi = {10.1145/1542207.1542238}, + acmid = 1542238, + publisher = pub-acm, + address = pub-acm:adr, + keywords = {access control models, access control policies}, + abstract = {We address some fundamental questions, which were raised + by Atluri and Ferraiolo at SACMAT'08, on the prospects for + and benefits of a meta-model of access control. We + demonstrate that a meta-model for access control can be + defined and that multiple access control models can be + derived as special cases. An anticipated consequence of the + contribution that we describe is to encourage researchers + to adopt a meta-model view of access control rather than + them developing the next 700 particular instances of access + control models.} +} + +@Article{ sandhu.ea:role-based:1996, + author = {Ravi S. Sandhu and Edward J. Coyne and Hal L. Feinstein + and Charles E. Youman}, + title = {Role-Based Access Control Models}, + journal = j-computer, + year = 1996, + volume = 29, + number = 2, + address = pub-ieee:adr, + publisher = pub-ieee, + pages = {38--47}, + url = {http://ite.gmu.edu/list/journals/computer/pdf_ver/i94rbac(org).pdf} + , + abstract = {Abstract This article introduces a family of reference + models for rolebased acce ss control (RBAC) in which + permissions are associated with roles, and users are made + members of appropriate roles. This greatly simplifies + management of permiss ions. Roles are closely related to + the concept of user groups in access control. However, a + role brings together a set of users on one side and a set + of permiss ions on the other, whereas user groups are + typically defined as a set of users o nly. + + The basic concepts of RBAC originated with early multi-user + computer systems. Th e resurgence of interest in RBAC has + been driven by the need for general-purpose customizable + facilities for RBAC and the need to manage the + administration of R BAC itself. As a consequence RBAC + facilities range from simple to complex. This article + describes a novel framework of reference models to + systematically addres s the diverse components of RBAC, and + their interactions.}, + issn = {0018-9162}, + keywords = {Computational linguistics; Computer control systems; + Computer simulation; Computer software; Data abstraction; + Database systems; Discretionary access control; Encoding + (symbols); Integration; Mandator access control; Role based + access control; Semantics; Software encoding; User + interfaces}, + acknowledgement={none}, + bibkey = {sandhu.ea:role-based:1996} +} + +@Article{ wainer.ea:dw-rbac:2007, + author = {Jacques Wainer and Akhil Kumar and Paulo Barthelmess}, + title = {DW-RBAC: A formal security model of delegation and + revocation in workflow systems}, + journal = {Inf. Syst.}, + year = 2007, + volume = 32, + number = 3, + pages = {365--384}, + abstract = {One reason workflow systems have been criticized as being + inflexible is that they lack support for delegation. This + paper shows how delegation can be introduced in a workflow + system by extending the role-based access control (RBAC) + model. The current RBAC model is a security mechanism to + implement access control in organizations by allowing users + to be assigned to roles and privileges to be associated + with the roles. Thus, users can perform tasks based on the + privileges possessed by their own role or roles they + inherit by virtue of their organizational position. + However, there is no easy way to handle delegations within + this model. This paper tries to treat the issues + surrounding delegation in workflow systems in a + comprehensive way. We show how delegations can be + incorporated into the RBAC model in a simple and + straightforward manner. The new extended model is called + RBAC with delegation in a workflow context (DW-RBAC). It + allows for delegations to be specified from a user to + another user, and later revoked when the delegation is no + longer required. The implications of such specifications + and their subsequent revocations are examined. Several + formal definitions for assertion, acceptance, execution and + revocation are provided, and proofs are given for the + important properties of our delegation framework.}, + issn = {0306-4379}, + doi = {http://dx.doi.org/10.1016/j.is.2005.11.008}, + publisher = pub-elsevier, + address = {Oxford, UK, UK}, + tags = {ReadingList, SoKNOS}, + clearance = {unclassified}, + timestap = {2008-05-26} +} + +@InProceedings{ sandhu.ea:nist:2000, + author = {Ravi S. Sandhu and David F. Ferraiolo and D. Richard + Kuhn}, + title = {The NIST model for role-based access control: towards a + unified standard}, + booktitle = {ACM Workshop on Role-Based Access Control}, + year = 2000, + pages = {47--63}, + doi = {10.1145/344287.344301}, + tags = {ReadingList, AccessControl}, + clearance = {unclassified}, + timestap = {2008-05-26} +} + +@Article{ samuel.ea:context-aware:2008, + author = {Samuel, A. and Ghafoor, A. and Bertino, E.}, + title = {Context-Aware Adaptation of Access-Control Policies}, + journal = {Internet Computing, IEEE}, + year = 2008, + volume = 12, + number = 1, + pages = {51--54}, + abstract = {Today, public-service delivery mechanisms such as + hospitals, police, and fire departments rely on digital + generation, storage, and analysis of vital information. To + protect critical digital resources, these organizations + employ access-control mechanisms, which define rules under + which authorized users can access the resources they need + to perform organizational tasks. Natural or man-made + disasters pose a unique challenge, whereby previously + defined constraints can potentially debilitate an + organization's ability to act. Here, the authors propose + employing contextual parameters - specifically, activity + context in the form of emergency warnings - to adapt + access-control policies according to a priori + configuration.}, + keywords = {authorisation, disasters, organisational + aspectsaccess-control policy, context-aware adaptation, + digital resource protection, natural disaster, + organizational task, public-service delivery mechanism}, + doi = {10.1109/MIC.2008.6}, + issn = {1089-7801}, + tags = {ReadingList, AccessControl, SoKNOS}, + clearance = {unclassified}, + timestap = {2008-05-26} +} + +@Article{ bertino.ea:trbac:2001, + author = {Elisa Bertino and Piero Andrea Bonatti and Elena Ferrari}, + title = {TRBAC: A temporal role-based access control model}, + journal = {ACM Trans. Inf. Syst. Secur.}, + volume = 4, + number = 3, + year = 2001, + issn = {1094-9224}, + pages = {191--233}, + doi = {10.1145/501978.501979}, + publisher = pub-acm, + address = pub-acm:adr, + tags = {noTAG}, + clearance = {unclassified}, + timestap = {2008-05-29} +} + +@Article{ moyer.ea:generalized:2001, + title = {Generalized role-based access control}, + author = {Moyer, M.J. and Abamad, M.}, + journal = {Distributed Computing Systems, 2001. 21st International + Conference on.}, + year = 2001, + month = {Apr}, + pages = {391--398}, + keywords = {authorisation, distributed processing, transaction + processingGRBAC, JPEG, RBAC, access control, access control + decisions, access control models, environment roles, + environmental information, expressive power, generalized + role based access control, object roles, object type, rich + access control policies, security policy, security-relevant + characteristics, sensitivity level, subject roles}, + doi = {10.1109/ICDSC.2001.918969}, + abstract = {Generalized Role-Based Access Control (GRBAC) is a new + paradigm for creating and maintaining rich access control + policies. GRBAC leverages and extends the power of + traditional role based access control (RBAC) by + incorporating subject roles, object roles and environment + roles into access control decisions. Subject roles are like + traditional RBAC roles: they abstract the security-relevant + characteristics of subjects into categories that can be + used in defining a security policy. Similarly, object roles + abstract the various properties of objects, such as object + type (e.g., text, JPEG, executable) or sensitivity level + (e.g., classified, top secret) into categories. Environment + roles capture environmental information, such as time of + day or system load so it can be used to mediate access + control. Together, these three types of roles offer + flexibility and expressive power, as well as a degree of + usability not found in current access control models}, + tags = {noTAG}, + clearance = {unclassified}, + timestap = {2008-05-29} +} + +@InProceedings{ bell.ea:secure:1996, + author = {D. Elliott Bell and Leonard J. LaPadula}, + title = {Secure Computer Systems: A Mathematical Model, Volume + {II}}, + booktitle = {Journal of Computer Security 4}, + year = 1996, + pages = {229--263}, + note = {An electronic reconstruction of \emph{Secure Computer + Systems: Mathematical Foundations}, 1973} +} + +@InProceedings{ bell:looking:2005, + title = {Looking Back at the Bell-La Padula Model}, + author = {D. Elliott Bell}, + journal = proc + # { the 21st Annual Computer Security Applications + Conference}, + year = 2005, + isbn = {1063-9527}, + doi = {10.1109/CSAC.2005.37}, + publisher = {pub-ieee}, + address = pub-ieee:adr, + pages = {337--351} +} + +@Booklet{ oasis:xacml:2005, + title = {{eXtensible Access Control Markup Language (XACML)}, + Version 2.0}, + year = 2005, + url = {http://docs.oasis-open.org/xacml/2.0/XACML-2.0-OS-NORMATIVE.zip} + , + bibkey = {oasis:xacml:2005}, + publisher = {OASIS}, + key = {OASIS}, + language = {USenglish}, + public = {yes} +} + + +@InProceedings{ ferreira.ea:how:2009, + author = {Ana Ferreira and David Chadwick and Pedro Farinha and + Gansen Zhao and Rui Chilro and Ricardo Cruz-Correia and + Luis Antunes}, + title = {How to securely break into RBAC: the BTG-RBAC model}, + booktitle = {Annual Computer Security Applications Conference (ACSAC)}, + year = 2009, + abstract = {Access control models describe frameworks that dictate how + subjects (e.g. users) access resources. In the Role-Based + Access Control (RBAC) model access to resources is based on + the role the user holds within the organization. Although + flexible and easier to manage within large-scale + authorization frameworks, RBAC is usually a static model + where access control decisions have only two output + options: Grant or Deny. Break The Glass (BTG) policies can + be provided in order to break or override the access + controls within an access control policy but in a + controlled and justifiable manner. The main objective of + this paper is to integrate BTG within the NIST/ANSI RBAC + model in a transparent and secure way so that it can be + adopted generically in any domain where unanticipated or + emergency situations may occur. The new proposed model, + called BTG-RBAC, provides a third decision option BTG. This + allows break the glass policies to be implemented in any + application without any major changes to either the + application or the RBAC authorization infrastructure, apart + from the decision engine. Finally, in order to validate the + model, we discuss how the BTG-RBAC model is being + introduced within a Portuguese healthcare institution where + the legislation requires that genetic information must be + accessed by a restricted group of healthcare professionals. + These professionals, advised by the ethical committee, have + required and asked for the implementation of the BTG + concept in order to comply with the said legislation.} +} + +@Manual{ ansi:rbac:2004, + bibkey = {ansi:rbac:1998}, + abstract = {This standard describes RBAC features that have achieved + acceptance in the commercial marketplace. It includes a + reference model and functional specifications for the RBAC + features defined in the reference model. It is intended for + (1) software engineers and product development managers who + design products incorporating access control features; and + (2) managers and procurement officials who seek to acquire + computer security products with features that provide + access control capabilities in accordance with commonly + known and understood terminology and functional + specifications.}, + note = {ANSI INCITS 359-2004}, + title = {American National Standard for Information Technology -- + Role Based Access Control}, + organization = {ANSI}, + year = 2004, + month = feb, + publisher = {The American National Standards Institute}, + address = {New York} +} + +@Article{ li.ea:critique:2007, + author = {Ninghui Li and JiWon Byun and Elisa Bertino}, + journal = {Security Privacy, IEEE}, + title = {A Critique of the ANSI Standard on Role-Based Access + Control}, + year = 2007, + month = {nov.-dec. }, + volume = 5, + number = 6, + pages = {41--49}, + abstract = {In 2004, the American National Standards Institute + approved the Role-Based Access Control standard to fulfill + "a need among government and industry purchasers of + information technology products for a consistent and + uniform definition of role based access control (RBAC) + features". Such uniform definitions give IT product vendors + and customers a common and unambiguous terminology for RBAC + features, which can lead to wider adoption of RBAC and + increased productivity. However, the current ANSI RBAC + Standard has several limitations, design flaws, and + technical errors that, it unaddressed, could lead to + confusions among IT product vendors and customers and to + RBAC implementations with different semantics, thus + defeating the standard's purpose.}, + keywords = {ANSI standard;IT product vendors;role-based access + control;DP industry;authorisation;standards;}, + doi = {10.1109/MSP.2007.158}, + issn = {1540-7993} +} + +@Article{ ardagna.ea:access:2010, + title = {Access control for smarter healthcare using policy + spaces}, + journal = {Computers \& Security}, + year = 2010, + issn = {0167-4048}, + doi = {10.1016/j.cose.2010.07.001}, + author = {Claudio A. Ardagna and Sabrina De Capitani di Vimercati + and Sara Foresti and Tyrone W. Grandison and Sushil Jajodia + and Pierangela Samarati}, + keywords = {Access control, Break the glass, Policy spaces, + Exceptions, Healthcare systems}, + abstract = {A fundamental requirement for the healthcare industry is + that the delivery of care comes first and nothing should + interfere with it. As a consequence, the access control + mechanisms used in healthcare to regulate and restrict the + disclosure of data are often bypassed in case of + emergencies. This phenomenon, called "break the glass", is + a common pattern in healthcare organizations and, though + quite useful and mandatory in emergency situations, from a + security perspective, it represents a serious system + weakness. Malicious users, in fact, can abuse the system by + exploiting the break the glass principle to gain + unauthorized privileges and accesses. In this paper, we + propose an access control solution aimed at better + regulating break the glass exceptions that occur in + healthcare systems. Our solution is based on the definition + of different policy spaces, a language, and a composition + algebra to regulate access to patient data and to balance + the rigorous nature of traditional access control systems + with the "delivery of care comes first" principle.} +} + +@Article{ sandhu.ea:arbac97:1999, + author = {Ravi Sandhu and Venkata Bhamidipati and Qamar Munawer}, + title = {The ARBAC97 model for role-based administration of roles}, + journal = j-tissec, + volume = 2, + number = 1, + year = 1999, + issn = {1094-9224}, + pages = {105--135}, + doi = {10.1145/300830.300839}, + address = pub-acm:adr, + publisher = pub-acm, + abstract = { In role-based access control (RBAC), permissions are + associated with roles' and users are made members of roles, + thereby acquiring the roles; permissions. RBAC's motivation + is to simplify administration of authorizations. An + appealing possibility is to use RBAC itself to manage RBAC, + to further provide administrative convenience and + scalability, especially in decentralizing administrative + authority, responsibility, and chores. This paper describes + the motivation, intuition, and formal definition of a new + role-based model for RBAC administration. This model is + called ARBAC97 (administrative RBAC '97) and has three + components: URA97 (user-role assignment '97), RPA97 + (permission-role assignment '97), and RRA97 (role-role + assignment '97) dealing with different aspects of RBAC + administration. URA97, PRA97, and an outline of RRA97 were + defined in 1997, hence the designation given to the entire + model. RRA97 was completed in 1998. ARBAC97 is described + completely in this paper for the first time. We also + discusses possible extensions of ARBAC97. } +} + +@Article{ becker:information:2007, + title = {Information governance in NHS's NPfIT: A case for policy + specification}, + journal = {International Journal of Medical Informatics}, + volume = 76, + number = {5-6}, + pages = {432--437}, + year = 2007, + mynote = {"Virtual Biomedical Universities and E-Learning" and + "Secure eHealth: Managing Risk to Patient Data" - + E-Learning and Secure eHealth Double S.I.}, + issn = {1386-5056}, + doi = {10.1016/j.ijmedinf.2006.09.008}, + author = {Moritz Y. Becker}, + keywords = {Access control}, + abstract = {Purpose The National Health Service's (NHS's) National + Programme for Information Technology (NPfIT) in the UK with + its proposed nation-wide online health record service poses + serious technical challenges, especially with regard to + access control and patient confidentiality. The complexity + of the confidentiality requirements and their constantly + evolving nature (due to changes in law, guidelines and + ethical consensus) make traditional technologies such as + role-based access control (RBAC) unsuitable. Furthermore, a + more formal approach is also needed for debating about and + communicating on information governance, as + natural-language descriptions of security policies are + inherently ambiguous and incomplete. Our main goal is to + convince the reader of the strong benefits of employing + formal policy specification in nation-wide electronic + health record (EHR) projects.Approach Many difficulties + could be alleviated by specifying the requirements in a + formal authorisation policy language such as Cassandra. The + language is unambiguous, declarative and + machine-enforceable, and is based on distributed + constrained Datalog. Cassandra is interpreted within a + distributed Trust Management environment, where digital + credentials are used for establishing mutual trust between + strangers.Results To demonstrate how policy specification + can be applied to NPfIT, we translate a fragment of + natural-language NHS specification into formal Cassandra + rules. In particular, we present policy rules pertaining to + the management of Clinician Sealed Envelopes, the mechanism + by which clinical patient data can be concealed in the + nation-wide EHR service. Our case study exposes ambiguities + and incompletenesses in the informal NHS + documents.Conclusions We strongly recommend the use of + trust management and policy specification technology for + the implementation of nation-wide EHR infrastructures. + Formal policies can be used for automatically enforcing + confidentiality requirements, but also for specification + and communication purposes. Formalising the requirements + also reveals ambiguities and missing details in the + currently used informal specification documents.}, + publisher = pub-elsevier +} +@InCollection{ brucker.ea:extending:2009, + abstract = {Access control models are usually static, i.e., permissions are granted based on a policy that only changes seldom. Especially for scenarios in health care and disaster management, a more flexible support of access control, i.e., the underlying policy, is needed.\\\\Break-glass is one approach for such a flexible support of policies which helps to prevent system stagnation that could harm lives or otherwise result in losses. Today, break-glass techniques are usually added on top of standard access control solutions in an ad-hoc manner and, therefore, lack an integration into the underlying access control paradigm and the systems' access control enforcement architecture.\\\\We present an approach for integrating, in a fine-grained manner, break-glass strategies into standard access control models and their accompanying enforcement architecture. This integration provides means for specifying break-glass policies precisely and supporting model-driven development techniques based on such policies.}, + address = {New York, NY, USA}, + author = {Achim D. Brucker and Helmut Petritsch}, + booktitle = {ACM symposium on access control models and technologies (SACMAT)}, + copyright = {ACM}, + copyrighturl = {http://dl.acm.org/authorize?175073}, + doi = {10.1145/1542207.1542239}, + editor = {Barbara Carminati and James Joshi}, + isbn = {978-1-60558-537-6}, + keywords = {disaster management, access-control, break-glass, model-driven security}, + location = {Stresa, Italy}, + pages = {197--206}, + pdf = {http://www.brucker.ch/bibliography/download/2009/brucker.ea-extending-2009.pdf}, + publisher = {ACM Press}, + talk = {talk:brucker.ea:extending:2009}, + title = {Extending Access Control Models with Break-glass}, + url = {http://www.brucker.ch/bibliography/abstract/brucker.ea-extending-2009}, + year = {2009}, +} diff --git a/document/root.tex b/document/root.tex new file mode 100644 index 0000000..7f9def7 --- /dev/null +++ b/document/root.tex @@ -0,0 +1,154 @@ +\documentclass[11pt,DIV10,a4paper,twoside=semi,openright,titlepage]{scrreprt} +\usepackage{fixltx2e} +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +%%% Overrides the (rightfully issued) warning by Koma Script that \rm +%%% etc. should not be used (they are deprecated since more than a +%%% decade) + \DeclareOldFontCommand{\rm}{\normalfont\rmfamily}{\mathrm} + \DeclareOldFontCommand{\sf}{\normalfont\sffamily}{\mathsf} + \DeclareOldFontCommand{\tt}{\normalfont\ttfamily}{\mathtt} + \DeclareOldFontCommand{\bf}{\normalfont\bfseries}{\mathbf} + \DeclareOldFontCommand{\it}{\normalfont\itshape}{\mathit} +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + +\usepackage{isabelle,isabellesym} +\usepackage{stmaryrd} +\usepackage{paralist} +\usepackage{xspace} +\newcommand{\testgen}{HOL-TestGen\xspace} +\newcommand{\testgenFW}{HOL-TestGen/FW\xspace} +\usepackage[numbers, sort&compress, sectionbib]{natbib} +\usepackage{graphicx} +\usepackage{color} +\sloppy + +\usepackage{amssymb} + + + +\newcommand{\isasymmodels}{\isamath{\models}} +\newcommand{\HOL}{HOL} + +\newcommand{\ie}{i.\,e.} +\newcommand{\eg}{e.\,g.} + +\usepackage{pdfsetup} + +\urlstyle{rm} +\isabellestyle{it} +\renewcommand{\isamarkupheader}[1]{\section{#1}} +\renewcommand{\isamarkupsection}[1]{\subsection{#1}} +\renewcommand{\isamarkupsubsection}[1]{\subsubsection{#1}} +\renewcommand{\isamarkupsubsubsection}[1]{\paragraph{#1}} +\renewcommand{\isamarkupsect}[1]{\subsection{#1}} +\renewcommand{\isamarkupsubsect}[1]{\susubsection{#1}} +\renewcommand{\isamarkupsubsubsect}[1]{\paragraph{#1}} +\renewcommand{\isastyle}{\isastyleminor} + +\pagestyle{empty} +\begin{document} +\renewcommand{\subsubsectionautorefname}{Section} +\renewcommand{\subsectionautorefname}{Section} +\renewcommand{\sectionautorefname}{Section} +\renewcommand{\chapterautorefname}{Chapter} +\newcommand{\subtableautorefname}{\tableautorefname} +\newcommand{\subfigureautorefname}{\figureautorefname} + +\title{The Unified Policy Framework\\ + (UPF)} +\author{Achim D. Brucker\footnotemark[1] \quad + Lukas Br\"ugger\footnotemark[2] \quad + Burkhart Wolff\footnotemark[3]\\[1.5em] + \normalsize + \normalsize\footnotemark[1]~SAP SE, Vincenz-Priessnitz-Str. 1, 76131 Karlsruhe, + Germany \texorpdfstring{\\}{} + \normalsize\href{mailto:"Achim D. Brucker" + }{achim.brucker@sap.com}\\[1em] + % + \normalsize\footnotemark[2]Information Security, ETH Zurich, 8092 Zurich, Switzerland + \texorpdfstring{\\}{} + \normalsize\href{mailto:"Lukas Bruegger" + }{Lukas.A.Bruegger@gmail.com}\\[1em] + % + \normalsize\footnotemark[3]~Univ. Paris-Sud, Laboratoire LRI, + UMR8623, 91405 Orsay, France + France\texorpdfstring{\\}{} + \normalsize\href{mailto:"Burkhart Wolff" }{burkhart.wolff@lri.fr} +} + +\pagestyle{empty} +\publishers{% + \normalfont\normalsize% + \centerline{\textsf{\textbf{\large Abstract}}} + \vspace{1ex}% + \parbox{0.8\linewidth}{% + We present the \emph{Unified Policy Framework} + (UPF), a generic framework for modelling security + (access-control) policies; in Isabelle/\HOL. + %\cite{}. + UPF emphasizes the view that a policy is a policy decision + function that grants or denies access to resources, permissions, + etc. In other words, instead of modelling the + relations of permitted or prohibited requests directly, we model + the concrete function that implements the policy decision point + in a system, seen as an ``aspect'' of ``wrapper'' around the + business logic % Fachlogik + of a system. + In more detail, UPF is based on the following four principles: + \begin{inparaenum} + \item Functional representation of policies, + \item No conflicts are possible, + \item Three-valued decision type (allow, deny, undefined), + \item Output type not containing the decision only. + \end{inparaenum} + } +} + +\maketitle +\cleardoublepage +\pagestyle{plain} +\tableofcontents +\cleardoublepage + +\input{introduction} +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +% + % \input{session} + \chapter{The Unified Policy Framework (UPF)} + \input{UPFCore} + \input{ElementaryPolicies} + \input{SeqComposition} + \input{ParallelComposition} + \input{Analysis} + \input{Normalisation} + \input{NormalisationTestSpecification} + \input{UPF} + \chapter{Example} + \input{example-intro} + \input{Service} + \input{ServiceExample} +% +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +\input{conclusion} + +\chapter{Appendix} +\input{Monads} + +%%% Local Variables: +%%% mode: latex +%%% TeX-master: "root" +%%% End: + + +\nocite{brucker.ea:formal-fw-testing:2014,brucker.ea:hol-testgen-fw:2013,brucker.ea:theorem-prover:2012,brucker.ea:model-based:2011} +\nocite{bruegger:generation:2012} +\bibliographystyle{abbrvnat} +\bibliography{root} + + +\end{document} + +%%% Local Variables: +%%% mode: latex +%%% TeX-master: t +%%% End: