Browse Source

Import of official AFP entry for Isabelle 2019.

tags/afp-UPF-current-Isabelle2019^0
Achim D. Brucker 1 year ago
parent
commit
ae982dface
14 changed files with 223 additions and 223 deletions
  1. +1
    -1
      .ci/Jenkinsfile
  2. +14
    -14
      UPF/Analysis.thy
  3. +13
    -13
      UPF/ElementaryPolicies.thy
  4. +42
    -42
      UPF/Monads.thy
  5. +35
    -35
      UPF/Normalisation.thy
  6. +7
    -7
      UPF/NormalisationTestSpecification.thy
  7. +16
    -16
      UPF/ParallelComposition.thy
  8. +1
    -1
      UPF/ROOT
  9. +11
    -11
      UPF/SeqComposition.thy
  10. +30
    -30
      UPF/Service.thy
  11. +8
    -8
      UPF/ServiceExample.thy
  12. +3
    -3
      UPF/UPF.thy
  13. +40
    -40
      UPF/UPFCore.thy
  14. +2
    -2
      UPF/document/root.bib

+ 1
- 1
.ci/Jenkinsfile View File

@@ -3,7 +3,7 @@ pipeline {
stages {
stage('Build') {
steps {
sh 'docker run -v $PWD/UPF:/UPF logicalhacking:isabelle2018 isabelle build -D /UPF'
sh 'docker run -v $PWD/UPF:/UPF logicalhacking:isabelle2019 isabelle build -D /UPF'
}
}
}


+ 14
- 14
UPF/Analysis.thy View File

@@ -42,7 +42,7 @@
******************************************************************************)


section{* Properties on Policies *}
section\<open>Properties on Policies\<close>
theory
Analysis
imports
@@ -50,19 +50,19 @@ theory
SeqComposition
begin

text {*
text \<open>
In this theory, several standard policy properties are paraphrased in UPF terms.
*}
\<close>

subsection{* Basic Properties *}
subsection\<open>Basic Properties\<close>

subsubsection{* A Policy Has no Gaps *}
subsubsection\<open>A Policy Has no Gaps\<close>

definition gap_free :: "('a \<mapsto> 'b) \<Rightarrow> bool"
where "gap_free p = (dom p = UNIV)"

subsubsection{* Comparing Policies *}
text {* Policy p is more defined than q: *}
subsubsection\<open>Comparing Policies\<close>
text \<open>Policy p is more defined than q:\<close>
definition more_defined :: "('a \<mapsto> 'b) \<Rightarrow>('a \<mapsto> 'b) \<Rightarrow>bool"
where "more_defined p q = (dom q \<subseteq> dom p)"

@@ -74,7 +74,7 @@ lemma strictly_more_vs_more: "strictly_more_defined p q \<Longrightarrow> more_d
unfolding more_defined_def strictly_more_defined_def
by auto

text{* Policy p is more permissive than q: *}
text\<open>Policy p is more permissive than q:\<close>
definition more_permissive :: "('a \<mapsto> 'b) \<Rightarrow> ('a \<mapsto> 'b) \<Rightarrow> bool" (infixl "\<sqsubseteq>\<^sub>A" 60)
where " p \<sqsubseteq>\<^sub>A q = (\<forall> x. (case q x of \<lfloor>allow y\<rfloor> \<Rightarrow> (\<exists> z. (p x = \<lfloor>allow z\<rfloor>))
| \<lfloor>deny y\<rfloor> \<Rightarrow> True
@@ -97,7 +97,7 @@ lemma more_permissive_trans : "p \<sqsubseteq>\<^sub>A p' \<Longrightarrow> p' \
by(erule_tac x = x in allE, simp)
done

text{* Policy p is more rejective than q: *}
text\<open>Policy p is more rejective than q:\<close>
definition more_rejective :: "('a \<mapsto> 'b) \<Rightarrow> ('a \<mapsto> 'b) \<Rightarrow> bool" (infixl "\<sqsubseteq>\<^sub>D" 60)
where " p \<sqsubseteq>\<^sub>D q = (\<forall> x. (case q x of \<lfloor>deny y\<rfloor> \<Rightarrow> (\<exists> z. (p x = \<lfloor>deny z\<rfloor>))
| \<lfloor>allow y\<rfloor> \<Rightarrow> True
@@ -130,7 +130,7 @@ lemma "A\<^sub>I \<sqsubseteq>\<^sub>A p"
unfolding more_permissive_def allow_all_fun_def allow_pfun_def allow_all_id_def
by(auto split: option.split decision.split)
subsection{* Combined Data-Policy Refinement *}
subsection\<open>Combined Data-Policy Refinement\<close>
definition policy_refinement ::
"('a \<mapsto> 'b) \<Rightarrow> ('a' \<Rightarrow> 'a) \<Rightarrow>('b' \<Rightarrow> 'b) \<Rightarrow> ('a' \<mapsto> 'b') \<Rightarrow> bool"
@@ -168,13 +168,13 @@ theorem polref_trans:
done
done

subsection {* Equivalence of Policies *}
subsubsection{* Equivalence over domain D *}
subsection \<open>Equivalence of Policies\<close>
subsubsection\<open>Equivalence over domain D\<close>
definition p_eq_dom :: "('a \<mapsto> 'b) \<Rightarrow> 'a set \<Rightarrow> ('a \<mapsto> 'b) \<Rightarrow>bool" ("_ \<approx>\<^bsub>_\<^esub> _" [60,60,60]60)
where "p \<approx>\<^bsub>D\<^esub> q = (\<forall>x\<in>D. p x = q x)"
text{* p and q have no conflicts *}
text\<open>p and q have no conflicts\<close>
definition no_conflicts :: "('a \<mapsto> 'b) \<Rightarrow>('a \<mapsto> 'b) \<Rightarrow>bool" where
"no_conflicts p q = (dom p = dom q \<and> (\<forall>x\<in>(dom p).
(case p x of \<lfloor>allow y\<rfloor> \<Rightarrow> (\<exists>z. q x = \<lfloor>allow z\<rfloor>)
@@ -195,7 +195,7 @@ lemma policy_eq:
apply (metis)+
done
subsubsection{* Miscellaneous *}
subsubsection\<open>Miscellaneous\<close>
lemma dom_inter: "\<lbrakk>dom p \<inter> dom q = {}; p x = \<lfloor>y\<rfloor>\<rbrakk> \<Longrightarrow> q x = \<bottom>"
by (auto)


+ 13
- 13
UPF/ElementaryPolicies.thy View File

@@ -41,20 +41,20 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
******************************************************************************)

section{* Elementary Policies *}
section\<open>Elementary Policies\<close>
theory
ElementaryPolicies
imports
UPFCore
begin
text{*
text\<open>
In this theory, we introduce the elementary policies of UPF that build the basis
for more complex policies. These complex policies, respectively, embedding of
well-known access control or security models, are build by composing the elementary
policies defined in this theory.
*}
\<close>

subsection{* The Core Policy Combinators: Allow and Deny Everything *}
subsection\<open>The Core Policy Combinators: Allow and Deny Everything\<close>

definition
deny_pfun :: "('\<alpha> \<rightharpoonup>'\<beta>) \<Rightarrow> ('\<alpha> \<mapsto> '\<beta>)" ("AllD")
@@ -113,7 +113,7 @@ lemma neq_Allow_Deny: "pf \<noteq> \<emptyset> \<Longrightarrow> (deny_pfun pf)
done
done

subsection{* Common Instances *}
subsection\<open>Common Instances\<close>

definition allow_all_fun :: "('\<alpha> \<Rightarrow> '\<beta>) \<Rightarrow> ('\<alpha> \<mapsto> '\<beta>)" ("A\<^sub>f")
where "allow_all_fun f = allow_pfun (Some o f)"
@@ -137,7 +137,7 @@ definition
deny_all :: "('\<alpha> \<mapsto> unit)" ("D\<^sub>U") where
"deny_all p = \<lfloor>deny ()\<rfloor>"

text{* ... and resulting properties: *}
text\<open>... and resulting properties:\<close>

lemma "A\<^sub>I \<Oplus> Map.empty = A\<^sub>I"
by simp
@@ -160,9 +160,9 @@ lemma deny_left_cancel :"dom pf = UNIV \<Longrightarrow> (deny_pfun pf) \<Oplus>
apply (rule ext)+
by (auto simp: deny_pfun_def option.splits)

subsection{* Domain, Range, and Restrictions *}
subsection\<open>Domain, Range, and Restrictions\<close>

text{*
text\<open>
Since policies are essentially maps, we inherit the basic definitions for
domain and range on Maps: \\
\verb+Map.dom_def+ : @{thm Map.dom_def} \\
@@ -186,11 +186,11 @@ text{*
\item\verb+Map.dom_if+ @{thm Map.dom_if}
\item\verb+Map.dom_map_add+ @{thm Map.dom_map_add}
\end{itemize}
*}
\<close>

text{*
text\<open>
However, some properties are specific to policy concepts:
*}
\<close>
lemma sub_ran : "ran p \<subseteq> Allow \<union> Deny"
apply (auto simp: Allow_def Deny_def ran_def full_SetCompr_eq[symmetric])[1]
subgoal for x a
@@ -261,7 +261,7 @@ lemma ran_deny_all: "ran(D\<^sub>f id) = Deny"
done


text{*
text\<open>
Reasoning over \verb+dom+ is most crucial since it paves the way for simplification and
reordering of policies composed by override (i.e. by the normal left-to-right rule composition
method.
@@ -275,7 +275,7 @@ text{*
\item \verb+Map.map_add_upd_left+ @{thm Map.map_add_upd_left}
\end{itemize}
The latter rule also applies to allow- and deny-override.
*}
\<close>

definition dom_restrict :: "['\<alpha> set, '\<alpha>\<mapsto>'\<beta>] \<Rightarrow> '\<alpha>\<mapsto>'\<beta>" (infixr "\<triangleleft>" 55)
where "S \<triangleleft> p \<equiv> (\<lambda>x. if x \<in> S then p x else \<bottom>)"


+ 42
- 42
UPF/Monads.thy View File

@@ -41,15 +41,15 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
******************************************************************************)

section {* Basic Monad Theory for Sequential Computations *}
section \<open>Basic Monad Theory for Sequential Computations\<close>
theory
Monads
imports
Main
begin

subsection{* General Framework for Monad-based Sequence-Test *}
text{*
subsection\<open>General Framework for Monad-based Sequence-Test\<close>
text\<open>
As such, Higher-order Logic as a purely functional specification formalism has no built-in
mechanism for state and state-transitions. Forms of testing involving state require therefore
explicit mechanisms for their treatment inside the logic; a well-known technique to model
@@ -67,9 +67,9 @@ text{*
\item non-deterministic i/o automata, and
\item labelled transition systems (LTS)
\end{enumerate}
*}
\<close>

subsubsection{* State Exception Monads *}
subsubsection\<open>State Exception Monads\<close>
type_synonym ('o, '\<sigma>) MON\<^sub>S\<^sub>E = "'\<sigma> \<rightharpoonup> ('o \<times> '\<sigma>)"
definition bind_SE :: "('o,'\<sigma>)MON\<^sub>S\<^sub>E \<Rightarrow> ('o \<Rightarrow> ('o','\<sigma>)MON\<^sub>S\<^sub>E) \<Rightarrow> ('o','\<sigma>)MON\<^sub>S\<^sub>E"
@@ -103,9 +103,9 @@ definition if_SE :: "['\<sigma> \<Rightarrow> bool, ('\<alpha>, '\<sigma>)MON\<^
where "if_SE c E F = (\<lambda>\<sigma>. if c \<sigma> then E \<sigma> else F \<sigma>)"
notation if_SE ("if\<^sub>S\<^sub>E")

text{*
text\<open>
The standard monad theorems about unit and associativity:
*}
\<close>

lemma bind_left_unit : "(x \<leftarrow> return a; k) = k"
apply (simp add: unit_SE_def bind_SE_def)
@@ -131,7 +131,7 @@ lemma bind_assoc: "(y \<leftarrow> (x \<leftarrow> m; k); h) = (x \<leftarrow> m
done
done

text{*
text\<open>
In order to express test-sequences also on the object-level and to make our theory amenable to
formal reasoning over test-sequences, we represent them as lists of input and generalize the
bind-operator of the state-exception monad accordingly. The approach is straightforward, but
@@ -147,9 +147,9 @@ text{*
of side-conditions have to be expressed inside \HOL. From the user perspective, this will not
make much difference, since junk-data resulting from too weak typing can be ruled out by adopted
front-ends.
*}
\<close>

text{*
text\<open>
In order to express test-sequences also on the object-level and to make our theory amenable to
formal reasoning over test-sequences, we represent them as lists of input and generalize the
bind-operator of the state-exception monad accordingly. Thus, the notion of test-sequence
@@ -168,15 +168,15 @@ text{*
same operation will occur; this form of side-conditions have to be expressed
inside \HOL. From the user perspective, this will not make much difference,
since junk-data resulting from too weak typing can be ruled out by adopted
front-ends. *}
front-ends.\<close>


text{* Note that the subsequent notion of a test-sequence allows the io stepping
text\<open>Note that the subsequent notion of a test-sequence allows the io stepping
function (and the special case of a program under test) to stop execution
\emph{within} the sequence; such premature terminations are characterized by an
output list which is shorter than the input list. Note that our primary
notion of multiple execution ignores failure and reports failure
steps only by missing results ... *}
steps only by missing results ...\<close>


fun mbind :: "'\<iota> list \<Rightarrow> ('\<iota> \<Rightarrow> ('o,'\<sigma>) MON\<^sub>S\<^sub>E) \<Rightarrow> ('o list,'\<sigma>) MON\<^sub>S\<^sub>E"
@@ -188,9 +188,9 @@ fun mbind :: "'\<iota> list \<Rightarrow> ('\<iota> \<Rightarrow> ('o,'\<si
None \<Rightarrow> Some([out],\<sigma>')
| Some(outs,\<sigma>'') \<Rightarrow> Some(out#outs,\<sigma>'')))"

text{* As mentioned, this definition is fail-safe; in case of an exception,
text\<open>As mentioned, this definition is fail-safe; in case of an exception,
the current state is maintained, no result is reported.
An alternative is the fail-strict variant @{text "mbind'"} defined below. *}
An alternative is the fail-strict variant \<open>mbind'\<close> defined below.\<close>

lemma mbind_unit [simp]: "mbind [] f = (return [])"
by(rule ext, simp add: unit_SE_def)
@@ -214,7 +214,7 @@ lemma mbind_nofailure [simp]: "mbind S f \<sigma> \<noteq> None"
done
done

text{* The fail-strict version of @{text mbind'} looks as follows: *}
text\<open>The fail-strict version of \<open>mbind'\<close> looks as follows:\<close>
fun mbind' :: "'\<iota> list \<Rightarrow> ('\<iota> \<Rightarrow> ('o,'\<sigma>) MON\<^sub>S\<^sub>E) \<Rightarrow> ('o list,'\<sigma>) MON\<^sub>S\<^sub>E"
where "mbind' [] iostep \<sigma> = Some([], \<sigma>)" |
"mbind' (a#H) iostep \<sigma> =
@@ -224,21 +224,21 @@ where "mbind' [] iostep \<sigma> = Some([], \<sigma>)" |
None \<Rightarrow> None \<comment> \<open>fail-strict\<close>
| Some(outs,\<sigma>'') \<Rightarrow> Some(out#outs,\<sigma>'')))"

text{*
text\<open>
mbind' as failure strict operator can be seen as a foldr on bind---if the types would
match \ldots
*}
\<close>

definition try_SE :: "('o,'\<sigma>) MON\<^sub>S\<^sub>E \<Rightarrow> ('o option,'\<sigma>) MON\<^sub>S\<^sub>E"
where "try_SE ioprog = (\<lambda>\<sigma>. case ioprog \<sigma> of
None \<Rightarrow> Some(None, \<sigma>)
| Some(outs, \<sigma>') \<Rightarrow> Some(Some outs, \<sigma>'))"
text{* In contrast @{term mbind} as a failure safe operator can roughly be seen
text\<open>In contrast @{term mbind} as a failure safe operator can roughly be seen
as a @{term foldr} on bind - try:
@{text "m1 ; try m2 ; try m3; ..."}. Note, that the rough equivalence only holds for
\<open>m1 ; try m2 ; try m3; ...\<close>. Note, that the rough equivalence only holds for
certain predicates in the sequence - length equivalence modulo None,
for example. However, if a conditional is added, the equivalence
can be made precise: *}
can be made precise:\<close>


lemma mbind_try:
@@ -261,8 +261,8 @@ lemma mbind_try:
done
done

text{* On this basis, a symbolic evaluation scheme can be established
that reduces @{term mbind}-code to @{term try_SE}-code and If-cascades. *}
text\<open>On this basis, a symbolic evaluation scheme can be established
that reduces @{term mbind}-code to @{term try_SE}-code and If-cascades.\<close>


definition alt_SE :: "[('o, '\<sigma>)MON\<^sub>S\<^sub>E, ('o, '\<sigma>)MON\<^sub>S\<^sub>E] \<Rightarrow> ('o, '\<sigma>)MON\<^sub>S\<^sub>E" (infixl "\<sqinter>\<^sub>S\<^sub>E" 10)
@@ -279,13 +279,13 @@ lemma malt_SE_mt [simp]: "\<Sqinter>\<^sub>S\<^sub>E [] = fail\<^sub>S\<^sub>E"
lemma malt_SE_cons [simp]: "\<Sqinter>\<^sub>S\<^sub>E (a # S) = (a \<sqinter>\<^sub>S\<^sub>E (\<Sqinter>\<^sub>S\<^sub>E S))"
by(simp add: malt_SE_def)

subsubsection{* State-Backtrack Monads *}
text{*This subsection is still rudimentary and as such an interesting
subsubsection\<open>State-Backtrack Monads\<close>
text\<open>This subsection is still rudimentary and as such an interesting
formal analogue to the previous monad definitions. It is doubtful that it is
interesting for testing and as a computational structure at all.
Clearly more relevant is ``sequence'' instead of ``set,'' which would
rephrase Isabelle's internal tactic concept.
*}
\<close>


type_synonym ('o, '\<sigma>) MON\<^sub>S\<^sub>B = "'\<sigma> \<Rightarrow> ('o \<times> '\<sigma>) set"
@@ -318,13 +318,13 @@ lemma bind_assoc_SB: "(y := (x := m; k); h) = (x := m; (y := k; h))"
apply (simp add: unit_SB_def bind_SB_def split_def)
done

subsubsection{* State Backtrack Exception Monad *}
text{*
subsubsection\<open>State Backtrack Exception Monad\<close>
text\<open>
The following combination of the previous two Monad-Constructions allows for the semantic
foundation of a simple generic assertion language in the style of Schirmer's Simpl-Language or
Rustan Leino's Boogie-PL language. The key is to use the exceptional element None for violations
of the assert-statement.
*}
\<close>
type_synonym ('o, '\<sigma>) MON\<^sub>S\<^sub>B\<^sub>E = "'\<sigma> \<Rightarrow> (('o \<times> '\<sigma>) set) option"
definition bind_SBE :: "('o,'\<sigma>)MON\<^sub>S\<^sub>B\<^sub>E \<Rightarrow> ('o \<Rightarrow> ('o','\<sigma>)MON\<^sub>S\<^sub>B\<^sub>E) \<Rightarrow> ('o','\<sigma>)MON\<^sub>S\<^sub>B\<^sub>E"
@@ -412,20 +412,20 @@ qed

subsection{* Valid Test Sequences in the State Exception Monad *}
text{*
subsection\<open>Valid Test Sequences in the State Exception Monad\<close>
text\<open>
This is still an unstructured merge of executable monad concepts and specification oriented
high-level properties initiating test procedures.
*}
\<close>

definition valid_SE :: "'\<sigma> \<Rightarrow> (bool,'\<sigma>) MON\<^sub>S\<^sub>E \<Rightarrow> bool" (infix "\<Turnstile>" 15)
where "(\<sigma> \<Turnstile> m) = (m \<sigma> \<noteq> None \<and> fst(the (m \<sigma>)))"
text{*
text\<open>
This notation consideres failures as valid---a definition inspired by I/O conformance.
Note that it is not possible to define this concept once and for all in a Hindley-Milner
type-system. For the moment, we present it only for the state-exception monad, although for
the same definition, this notion is applicable to other monads as well.
*}
\<close>

lemma syntax_test :
"\<sigma> \<Turnstile> (os \<leftarrow> (mbind \<iota>s ioprog); return(length \<iota>s = length os))"
@@ -435,7 +435,7 @@ oops
lemma valid_true[simp]: "(\<sigma> \<Turnstile> (s \<leftarrow> return x ; return (P s))) = P x"
by(simp add: valid_SE_def unit_SE_def bind_SE_def)

text{* Recall mbind\_unit for the base case. *}
text\<open>Recall mbind\_unit for the base case.\<close>

lemma valid_failure: "ioprog a \<sigma> = None \<Longrightarrow>
(\<sigma> \<Turnstile> (s \<leftarrow> mbind (a#S) ioprog ; M s)) =
@@ -549,12 +549,12 @@ lemma assume_D : "(\<sigma> \<Turnstile> (x \<leftarrow> assume\<^sub>S\<^sub>E
apply (simp)
done

text{*
text\<open>
These two rule prove that the SE Monad in connection with the notion of valid sequence is
actually sufficient for a representation of a Boogie-like language. The SBE monad with explicit
sets of states---to be shown below---is strictly speaking not necessary (and will therefore
be discontinued in the development).
*}
\<close>
lemma if_SE_D1 : "P \<sigma> \<Longrightarrow> (\<sigma> \<Turnstile> if\<^sub>S\<^sub>E P B\<^sub>1 B\<^sub>2) = (\<sigma> \<Turnstile> B\<^sub>1)"
by(auto simp: if_SE_def valid_SE_def)
@@ -576,17 +576,17 @@ lemma [code]: "(\<sigma> \<Turnstile> m) = (case (m \<sigma>) of None \<Rightar
apply (auto)
done
subsection{* Valid Test Sequences in the State Exception Backtrack Monad *}
text{*
subsection\<open>Valid Test Sequences in the State Exception Backtrack Monad\<close>
text\<open>
This is still an unstructured merge of executable monad concepts and specification oriented
high-level properties initiating test procedures.
*}
\<close>
definition valid_SBE :: "'\<sigma> \<Rightarrow> ('a,'\<sigma>) MON\<^sub>S\<^sub>B\<^sub>E \<Rightarrow> bool" (infix "\<Turnstile>\<^sub>S\<^sub>B\<^sub>E" 15)
where "\<sigma> \<Turnstile>\<^sub>S\<^sub>B\<^sub>E m \<equiv> (m \<sigma> \<noteq> None)"
text{*
text\<open>
This notation considers all non-failures as valid.
*}
\<close>
lemma assume_assert: "(\<sigma> \<Turnstile>\<^sub>S\<^sub>B\<^sub>E ( _ :\<equiv> assume\<^sub>S\<^sub>B\<^sub>E P ; assert\<^sub>S\<^sub>B\<^sub>E Q)) = (P \<sigma> \<longrightarrow> Q \<sigma>)"
by(simp add: valid_SBE_def assume_SBE_def assert_SBE_def bind_SBE_def)


+ 35
- 35
UPF/Normalisation.thy View File

@@ -40,7 +40,7 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
******************************************************************************)

section{* Policy Transformations *}
section\<open>Policy Transformations\<close>
theory
Normalisation
imports
@@ -48,75 +48,75 @@ theory
ParallelComposition
begin
text{*
text\<open>
This theory provides the formalisations required for the transformation of UPF policies.
A typical usage scenario can be observed in the firewall case
study~\cite{brucker.ea:formal-fw-testing:2014}.
*}
\<close>
subsection{* Elementary Operators *}
text{*
subsection\<open>Elementary Operators\<close>
text\<open>
We start by providing several operators and theorems useful when reasoning about a list of
rules which should eventually be interpreted as combined using the standard override operator.
*}
\<close>
text{*
text\<open>
The following definition takes as argument a list of rules and returns a policy where the
rules are combined using the standard override operator.
*}
\<close>
definition list2policy::"('a \<mapsto> 'b) list \<Rightarrow> ('a \<mapsto> 'b)" where
"list2policy l = foldr (\<lambda> x y. (x \<Oplus> y)) l \<emptyset>"
text{*
text\<open>
Determine the position of element of a list.
*}
\<close>
fun position :: "'\<alpha> \<Rightarrow> '\<alpha> list \<Rightarrow> nat" where
"position a [] = 0"
|"(position a (x#xs)) = (if a = x then 1 else (Suc (position a xs)))"
text{*
text\<open>
Provides the first applied rule of a policy given as a list of rules.
*}
\<close>
fun applied_rule where
"applied_rule C a (x#xs) = (if a \<in> dom (C x) then (Some x)
else (applied_rule C a xs))"
|"applied_rule C a [] = None"
text {*
text \<open>
The following is used if the list is constructed backwards.
*}
\<close>
definition applied_rule_rev where
"applied_rule_rev C a x = applied_rule C a (rev x)"
text{*
text\<open>
The following is a typical policy transformation. It can be applied to any type of policy and
removes all the rules from a policy with an empty domain. It takes two arguments: a semantic
interpretation function and a list of rules.
*}
\<close>
fun rm_MT_rules where
"rm_MT_rules C (x#xs) = (if dom (C x)= {}
then rm_MT_rules C xs
else x#(rm_MT_rules C xs))"
|"rm_MT_rules C [] = []"
text {*
text \<open>
The following invariant establishes that there are no rules with an empty domain in a list
of rules.
*}
\<close>
fun none_MT_rules where
"none_MT_rules C (x#xs) = (dom (C x) \<noteq> {} \<and> (none_MT_rules C xs))"
|"none_MT_rules C [] = True"
text{*
text\<open>
The following related invariant establishes that the policy has not a completely empty domain.
*}
\<close>
fun not_MT where
"not_MT C (x#xs) = (if (dom (C x) = {}) then (not_MT C xs) else True)"
|"not_MT C [] = False"
text{*
text\<open>
Next, a few theorems about the two invariants and the transformation:
*}
\<close>
lemma none_MT_rules_vs_notMT: "none_MT_rules C p \<Longrightarrow> p \<noteq> [] \<Longrightarrow> not_MT C p"
apply (induct p)
apply (simp_all)
@@ -174,7 +174,7 @@ lemma NMPrm: "not_MT C p \<Longrightarrow> not_MT C (rm_MT_rules C p)"
apply (simp_all)
done
text{* Next, a few theorems about applied\_rule: *}
text\<open>Next, a few theorems about applied\_rule:\<close>
lemma mrconc: "applied_rule_rev C x p = Some a \<Longrightarrow> applied_rule_rev C x (b#p) = Some a"
proof (induct p rule: rev_induct)
case Nil show ?case using Nil
@@ -236,8 +236,8 @@ next
qed

subsection{* Distributivity of the Transformation. *}
text{*
subsection\<open>Distributivity of the Transformation.\<close>
text\<open>
The scenario is the following (can be applied iteratively):
\begin{itemize}
\item Two policies are combined using one of the parallel combinators
@@ -246,12 +246,12 @@ text{*
\item policies that are semantically equivalent to the original policy if
\item combined from left to right using the override operator.
\end{itemize}
*}
\<close>

text{*
text\<open>
The following function is crucial for the distribution. Its arguments are a policy, a list
of policies, a parallel combinator, and a range and a domain coercion function.
*}
\<close>
fun prod_list :: "('\<alpha> \<mapsto>'\<beta>) \<Rightarrow> (('\<gamma> \<mapsto>'\<delta>) list) \<Rightarrow>
(('\<alpha> \<mapsto>'\<beta>) \<Rightarrow> ('\<gamma> \<mapsto>'\<delta>) \<Rightarrow> (('\<alpha> \<times> '\<gamma>) \<mapsto> ('\<beta> \<times> '\<delta>))) \<Rightarrow>
(('\<beta> \<times> '\<delta>) \<Rightarrow> 'y) \<Rightarrow> ('x \<Rightarrow> ('\<alpha> \<times> '\<gamma>)) \<Rightarrow>
@@ -260,9 +260,9 @@ fun prod_list :: "('\<alpha> \<mapsto>'\<beta>) \<Rightarrow> (('\<gamma> \<maps
((ran_adapt o_f ((par_comb x y) o dom_adapt))#(prod_list x ys par_comb ran_adapt dom_adapt))"
| "prod_list x [] par_comb ran_adapt dom_adapt = []"
text{*
text\<open>
An instance, as usual there are four of them.
*}
\<close>
definition prod_2_list :: "[('\<alpha> \<mapsto>'\<beta>), (('\<gamma> \<mapsto>'\<delta>) list)] \<Rightarrow>
(('\<beta> \<times> '\<delta>) \<Rightarrow> 'y) \<Rightarrow> ('x \<Rightarrow> ('\<alpha> \<times> '\<gamma>)) \<Rightarrow>
@@ -277,10 +277,10 @@ lemma list2listNMT: "x \<noteq> [] \<Longrightarrow> map sem x \<noteq> []"
lemma two_conc: "(prod_list x (y#ys) p r d) = ((r o_f ((p x y) o d))#(prod_list x ys p r d))"
by simp

text{*
text\<open>
The following two invariants establish if the law of distributivity holds for a combinator
and if an operator is strict regarding undefinedness.
*}
\<close>
definition is_distr where
"is_distr p = (\<lambda> g f. (\<forall> N P1 P2. ((g o_f ((p N (P1 \<Oplus> P2)) o f)) =
((g o_f ((p N P1) o f)) \<Oplus> (g o_f ((p N P2) o f))))))"
@@ -320,9 +320,9 @@ lemma notDom: "x \<in> dom A \<Longrightarrow> \<not> A x = None"
apply auto
done
text{*
text\<open>
The following theorems are crucial: they establish the correctness of the distribution.
*}
\<close>
lemma Norm_Distr_1: "((r o_f (((\<Otimes>\<^sub>1) P1 (list2policy P2)) o d)) x =
((list2policy ((P1 \<Otimes>\<^sub>L P2) (\<Otimes>\<^sub>1) r d)) x))"
proof (induct P2)
@@ -395,7 +395,7 @@ next
qed
qed
text {* Some domain reasoning *}
text \<open>Some domain reasoning\<close>
lemma domSubsetDistr1: "dom A = UNIV \<Longrightarrow> dom ((\<lambda>(x, y). x) o_f (A \<Otimes>\<^sub>1 B) o (\<lambda> x. (x,x))) = dom B"
apply (rule set_eqI)
apply (rule iffI)


+ 7
- 7
UPF/NormalisationTestSpecification.thy View File

@@ -40,14 +40,14 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
******************************************************************************)

section {* Policy Transformation for Testing *}
section \<open>Policy Transformation for Testing\<close>
theory
NormalisationTestSpecification
imports
Normalisation
begin

text{*
text\<open>
This theory provides functions and theorems which are useful if one wants to test policy
which are transformed. Most exist in two versions: one where the domains of the rules
of the list (which is the result of a transformation) are pairwise disjoint, and one where
@@ -55,11 +55,11 @@ text{*

The examples in the firewall case study provide a good documentation how these theories can
be applied.
*}
\<close>

text{*
text\<open>
This invariant establishes that the domains of a list of rules are pairwise disjoint.
*}
\<close>
fun disjDom where
"disjDom (x#xs) = ((\<forall>y\<in>(set xs). dom x \<inter> dom y = {}) \<and> disjDom xs)"
|"disjDom [] = True"
@@ -110,11 +110,11 @@ lemma distrPUTL:
apply (auto)
done

text{*
text\<open>
It makes sense to cater for the common special case where the normalisation returns a list
where the last element is a default-catch-all rule. It seems easier to cater for this globally,
rather than to require the normalisation procedures to do this.
*}
\<close>

fun gatherDomain_aux where
"gatherDomain_aux (x#xs) = (dom x \<union> (gatherDomain_aux xs))"


+ 16
- 16
UPF/ParallelComposition.thy View File

@@ -40,14 +40,14 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
******************************************************************************)

section{* Parallel Composition*}
section\<open>Parallel Composition\<close>
theory
ParallelComposition
imports
ElementaryPolicies
begin

text{*
text\<open>
The following combinators are based on the idea that two policies are executed in parallel.
Since both input and the output can differ, we chose to pair them.
@@ -60,13 +60,13 @@ text{*
In any case, although we have strictly speaking a pairing of decisions and not a nesting of
them, we will apply the same notational conventions as for the latter, i.e. as for
flattening.
*}
\<close>

subsection{* Parallel Combinators: Foundations *}
text {*
subsection\<open>Parallel Combinators: Foundations\<close>
text \<open>
There are four possible semantics how the decision can be combined, thus there are four
parallel composition operators. For each of them, we prove several properties.
*}
\<close>

definition prod_orA ::"['\<alpha>\<mapsto>'\<beta>, '\<gamma> \<mapsto>'\<delta>] \<Rightarrow> ('\<alpha>\<times>'\<gamma> \<mapsto> '\<beta>\<times>'\<delta>)" (infixr "\<Otimes>\<^sub>\<or>\<^sub>A" 55)
where "p1 \<Otimes>\<^sub>\<or>\<^sub>A p2 =
@@ -132,9 +132,9 @@ lemma prod_orD_quasi_commute: "p2 \<Otimes>\<^sub>\<or>\<^sub>D p1 = (((\<lambda
apply (simp split: option.splits decision.splits)
done

text{*
text\<open>
The following two combinators are by definition non-commutative, but still strict.
*}
\<close>
definition prod_1 :: "['\<alpha>\<mapsto>'\<beta>, '\<gamma> \<mapsto>'\<delta>] \<Rightarrow> ('\<alpha>\<times>'\<gamma> \<mapsto> '\<beta>\<times>'\<delta>)" (infixr "\<Otimes>\<^sub>1" 55)
where "p1 \<Otimes>\<^sub>1 p2 \<equiv>
@@ -212,11 +212,11 @@ lemma mt_prod_2_id[simp]:"\<emptyset> \<Otimes>\<^sub>2\<^sub>I p = \<emptyset>"
apply (simp add: prod_2_id_def prod_2_def)
done
subsection{* Combinators for Transition Policies *}
text {*
subsection\<open>Combinators for Transition Policies\<close>
text \<open>
For constructing transition policies, two additional combinators are required: one combines
state transitions by pairing the states, the other works equivalently on general maps.
*}
\<close>
definition parallel_map :: "('\<alpha> \<rightharpoonup> '\<beta>) \<Rightarrow> ('\<delta> \<rightharpoonup> '\<gamma>) \<Rightarrow>
('\<alpha> \<times> '\<delta> \<rightharpoonup> '\<beta> \<times> '\<gamma>)" (infixr "\<Otimes>\<^sub>M" 60)
@@ -231,11 +231,11 @@ where
"p1 \<Otimes>\<^sub>S p2 = (p1 \<Otimes>\<^sub>M p2) o (\<lambda> (a,b,c). ((a,b),a,c))"


subsection{* Range Splitting *}
text{*
subsection\<open>Range Splitting\<close>
text\<open>
The following combinator is a special case of both a parallel composition operator and a
range splitting operator. Its primary use case is when combining a policy with state transitions.
*}
\<close>

definition comp_ran_split :: "[('\<alpha> \<rightharpoonup> '\<gamma>) \<times> ('\<alpha> \<rightharpoonup>'\<gamma>), 'd \<mapsto> '\<beta>] \<Rightarrow> ('d \<times> '\<alpha>) \<mapsto> ('\<beta> \<times> '\<gamma>)"
(infixr "\<Otimes>\<^sub>\<nabla>" 100)
@@ -244,7 +244,7 @@ where "P \<Otimes>\<^sub>\<nabla> p \<equiv> \<lambda>x. case p (fst x) of
| \<lfloor>deny y\<rfloor> \<Rightarrow> (case ((snd P) (snd x)) of \<bottom> \<Rightarrow> \<bottom> | \<lfloor>z\<rfloor> \<Rightarrow> \<lfloor>deny (y,z)\<rfloor>)
| \<bottom> \<Rightarrow> \<bottom>"

text{* An alternative characterisation of the operator is as follows: *}
text\<open>An alternative characterisation of the operator is as follows:\<close>
lemma comp_ran_split_charn:
"(f, g) \<Otimes>\<^sub>\<nabla> p = (
(((p \<triangleright> Allow)\<Otimes>\<^sub>\<or>\<^sub>A (A\<^sub>p f)) \<Oplus>
@@ -257,7 +257,7 @@ lemma comp_ran_split_charn:
apply (auto)
done

subsection {* Distributivity of the parallel combinators *}
subsection \<open>Distributivity of the parallel combinators\<close>
lemma distr_or1_a: "(F = F1 \<Oplus> F2) \<Longrightarrow> (((N \<Otimes>\<^sub>1 F) o f) =
(((N \<Otimes>\<^sub>1 F1) o f) \<Oplus> ((N \<Otimes>\<^sub>1 F2) o f))) "


+ 1
- 1
UPF/ROOT View File

@@ -1,7 +1,7 @@
chapter AFP

session "UPF-devel" (AFP) = HOL +
description {* The Unified Policy Framework (UPF) *}
description "The Unified Policy Framework (UPF) "
options [timeout = 300]
theories
Monads


+ 11
- 11
UPF/SeqComposition.thy View File

@@ -40,23 +40,23 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
******************************************************************************)

section{* Sequential Composition *}
section\<open>Sequential Composition\<close>
theory
SeqComposition
imports
ElementaryPolicies
begin

text{*
text\<open>
Sequential composition is based on the idea that two policies are to be combined by applying
the second policy to the output of the first one. Again, there are four possibilities how the
decisions can be combined. *}
decisions can be combined.\<close>

subsection {* Flattening *}
text{*
subsection \<open>Flattening\<close>
text\<open>
A key concept of sequential policy composition is the flattening of nested decisions. There are
four possibilities, and these possibilities will give the various flavours of policy composition.
*}
\<close>
fun flat_orA :: "('\<alpha> decision) decision \<Rightarrow> ('\<alpha> decision)"
where "flat_orA(allow(allow y)) = allow y"
|"flat_orA(allow(deny y)) = allow y"
@@ -149,10 +149,10 @@ lemma flat_2_deny[dest]: "flat_2 x = deny y \<Longrightarrow> x = deny(deny y)
apply (case_tac "\<alpha>", simp_all)[1]
done

subsection{* Policy Composition *}
text{*
subsection\<open>Policy Composition\<close>
text\<open>
The following definition allows to compose two policies. Denies and allows are transferred.
*}
\<close>

fun lift :: "('\<alpha> \<mapsto> '\<beta>) \<Rightarrow> ('\<alpha> decision \<mapsto>'\<beta> decision)"
where "lift f (deny s) = (case f s of
@@ -170,10 +170,10 @@ lemma lift_mt [simp]: "lift \<emptyset> = \<emptyset>"
done
done

text{*
text\<open>
Since policies are maps, we inherit a composition on them. However, this results in nestings
of decisions---which must be flattened. As we now that there are four different forms of
flattening, we have four different forms of policy composition: *}
flattening, we have four different forms of policy composition:\<close>
definition
comp_orA :: "['\<beta>\<mapsto>'\<gamma>, '\<alpha>\<mapsto>'\<beta>] \<Rightarrow> '\<alpha>\<mapsto>'\<gamma>" (infixl "o'_orA" 55) where
"p2 o_orA p1 \<equiv> (map_option flat_orA) o (lift p2 \<circ>\<^sub>m p1)"


+ 30
- 30
UPF/Service.thy View File

@@ -40,52 +40,52 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
******************************************************************************)

section {* Secure Service Specification *}
section \<open>Secure Service Specification\<close>
theory
Service
imports
UPF
begin
text {*
text \<open>
In this section, we model a simple Web service and its access control model
that allows the staff in a hospital to access health care records of patients.
*}
\<close>

subsection{* Datatypes for Modelling Users and Roles*}
subsubsection {* Users *}
text{*
subsection\<open>Datatypes for Modelling Users and Roles\<close>
subsubsection \<open>Users\<close>
text\<open>
First, we introduce a type for users that we use to model that each
staff member has a unique id:
*}
\<close>
type_synonym user = int (* Each NHS employee has a unique NHS_ID. *)

text {*
text \<open>
Similarly, each patient has a unique id:
*}
\<close>
type_synonym patient = int (* Each patient gets a unique id *)

subsubsection {* Roles and Relationships*}
text{* In our example, we assume three different roles for members of the clinical staff: *}
subsubsection \<open>Roles and Relationships\<close>
text\<open>In our example, we assume three different roles for members of the clinical staff:\<close>

datatype role = ClinicalPractitioner | Nurse | Clerical

text{*
text\<open>
We model treatment relationships (legitimate relationships) between staff and patients
(respectively, their health records. This access control model is inspired by our detailed
NHS model.
*}
\<close>
type_synonym lr_id = int
type_synonym LR = "lr_id \<rightharpoonup> (user set)"
text{* The security context stores all the existing LRs. *}
text\<open>The security context stores all the existing LRs.\<close>
type_synonym \<Sigma> = "patient \<rightharpoonup> LR"
text{* The user context stores the roles the users are in. *}
text\<open>The user context stores the roles the users are in.\<close>
type_synonym \<upsilon> = "user \<rightharpoonup> role"
subsection {* Modelling Health Records and the Web Service API*}
subsubsection {* Health Records *}
text {* The content and the status of the entries of a health record *}
subsection \<open>Modelling Health Records and the Web Service API\<close>
subsubsection \<open>Health Records\<close>
text \<open>The content and the status of the entries of a health record\<close>
datatype data = dummyContent
datatype status = Open | Closed
type_synonym entry_id = int
@@ -93,8 +93,8 @@ type_synonym entry = "status \<times> user \<times> data"
type_synonym SCR = "(entry_id \<rightharpoonup> entry)"
type_synonym DB = "patient \<rightharpoonup> SCR"

subsubsection {* The Web Service API *}
text{* The operations provided by the service: *}
subsubsection \<open>The Web Service API\<close>
text\<open>The operations provided by the service:\<close>
datatype Operation = createSCR user role patient
| appendEntry user role patient entry_id entry
| deleteEntry user role patient entry_id
@@ -207,17 +207,17 @@ fun allContentStatic where
|"allContentStatic [] = True"


subsection{* Modelling Access Control*}
text {*
subsection\<open>Modelling Access Control\<close>
text \<open>
In the following, we define a rather complex access control model for our
scenario that extends traditional role-based access control
(RBAC)~\cite{sandhu.ea:role-based:1996} with treatment relationships and sealed
envelopes. Sealed envelopes (see~\cite{bruegger:generation:2012} for details)
are a variant of break-the-glass access control (see~\cite{brucker.ea:extending:2009}
for a general motivation and explanation of break-the-glass access control).
*}
\<close>

subsubsection {* Sealed Envelopes *}
subsubsection \<open>Sealed Envelopes\<close>

type_synonym SEPolicy = "(Operation \<times> DB \<mapsto> unit) "

@@ -259,7 +259,7 @@ definition SEPolicy :: SEPolicy where
lemmas SEsimps = SEPolicy_def get_entry_def userHasAccess_def
editEntrySE_def deleteEntrySE_def readEntrySE_def

subsubsection {* Legitimate Relationships *}
subsubsection \<open>Legitimate Relationships\<close>

type_synonym LRPolicy = "(Operation \<times> \<Sigma>, unit) policy"

@@ -365,7 +365,7 @@ definition FunPolicy where
removeLRFunPolicy \<Oplus> readSCRFunPolicy \<Oplus>
addLRFunPolicy \<Oplus> createFunPolicy \<Oplus> A\<^sub>U"

subsubsection{* Modelling Core RBAC *}
subsubsection\<open>Modelling Core RBAC\<close>

type_synonym RBACPolicy = "Operation \<times> \<upsilon> \<mapsto> unit"

@@ -389,9 +389,9 @@ definition RBACPolicy :: RBACPolicy where
then \<lfloor>allow ()\<rfloor>
else \<lfloor>deny ()\<rfloor>)"

subsection {* The State Transitions and Output Function*}
subsection \<open>The State Transitions and Output Function\<close>

subsubsection{* State Transition *}
subsubsection\<open>State Transition\<close>

fun OpSuccessDB :: "(Operation \<times> DB) \<rightharpoonup> DB" where
"OpSuccessDB (createSCR u r p,S) = (case S p of \<bottom> \<Rightarrow> \<lfloor>S(p\<mapsto>\<emptyset>)\<rfloor>
@@ -434,7 +434,7 @@ fun OpSuccessSigma :: "(Operation \<times> \<Sigma>) \<rightharpoonup> \<Sigma>"
fun OpSuccessUC :: "(Operation \<times> \<upsilon>) \<rightharpoonup> \<upsilon>" where
"OpSuccessUC (f,u) = \<lfloor>u\<rfloor>"

subsubsection {* Output *}
subsubsection \<open>Output\<close>

type_synonym Output = unit

@@ -445,7 +445,7 @@ fun OpSuccessOutput :: "(Operation) \<rightharpoonup> Output" where
fun OpFailOutput :: "Operation \<rightharpoonup> Output" where
"OpFailOutput x = \<lfloor>()\<rfloor>"

subsection {* Combine All Parts *}
subsection \<open>Combine All Parts\<close>

definition SE_LR_Policy :: "(Operation \<times> DB \<times> \<Sigma>, unit) policy" where
"SE_LR_Policy = (\<lambda>(x,x). x) o\<^sub>f (SEPolicy \<Otimes>\<^sub>\<or>\<^sub>D LR_Policy) o (\<lambda>(a,b,c). ((a,b),a,c))"


+ 8
- 8
UPF/ServiceExample.thy View File

@@ -39,19 +39,19 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
******************************************************************************)

section {* Instantiating Our Secure Service Example *}
section \<open>Instantiating Our Secure Service Example\<close>
theory
ServiceExample
imports
Service
begin
text {*
text \<open>
In the following, we briefly present an instantiations of our secure service example
from the last section. We assume three different members of the health care staff and
two patients:
*}
\<close>

subsection {* Access Control Configuration *}
subsection \<open>Access Control Configuration\<close>
definition alice :: user where "alice = 1"
definition bob :: user where "bob = 2"
definition charlie :: user where "charlie = 3"
@@ -85,11 +85,11 @@ definition LR1 :: LR where
definition \<Sigma>0 :: \<Sigma> where
"\<Sigma>0 = (Map.empty(patient1\<mapsto>LR1))"

subsection {* The Initial System State *}
subsection \<open>The Initial System State\<close>
definition \<sigma>0 :: "DB \<times> \<Sigma>\<times>\<upsilon>" where
"\<sigma>0 = (Spine0,\<Sigma>0,UC0)"
subsection{* Basic Properties *}
subsection\<open>Basic Properties\<close>

lemma [simp]: "(case a of allow d \<Rightarrow> \<lfloor>X\<rfloor> | deny d2 \<Rightarrow> \<lfloor>Y\<rfloor>) = \<bottom> \<Longrightarrow> False"
by (case_tac a,simp_all)
@@ -122,13 +122,13 @@ lemma deny_allow[simp]: " \<lfloor>deny ()\<rfloor> \<notin> Some ` range allow"
lemma allow_deny[simp]: " \<lfloor>allow ()\<rfloor> \<notin> Some ` range deny"
by auto
text{* Policy as monad. Alice using her first urp can read the SCR of patient1. *}
text\<open>Policy as monad. Alice using her first urp can read the SCR of patient1.\<close>
lemma
"(\<sigma>0 \<Turnstile> (os \<leftarrow> mbind [(createSCR alice Clerical patient1)] (PolMon);
(return (os = [(deny (Out) )]))))"
by (simp add: PolMon_def MonSimps PolSimps)
text{* Presenting her other urp, she is not allowed to read it. *}
text\<open>Presenting her other urp, she is not allowed to read it.\<close>
lemma "SE_LR_RBAC_Policy ((appendEntry alice Clerical patient1 ei d),\<sigma>0)= \<lfloor>deny ()\<rfloor>"
by (simp add: PolSimps)



+ 3
- 3
UPF/UPF.thy View File

@@ -41,7 +41,7 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
******************************************************************************)

section {* Putting Everything Together: UPF *}
section \<open>Putting Everything Together: UPF\<close>
theory
UPF
imports
@@ -50,10 +50,10 @@ theory
Analysis
begin

text{*
text\<open>
This is the top-level theory for the Unified Policy Framework (UPF) and, thus,
builds the base theory for using UPF. For the moment, we only define a set of
lemmas for all core UPF definitions that is useful for using UPF:
*}
\<close>
lemmas UPFDefs = UPFCoreDefs ParallelDefs ElementaryPoliciesDefs
end

+ 40
- 40
UPF/UPFCore.thy View File

@@ -41,7 +41,7 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
******************************************************************************)

section{* The Core of the Unified Policy Framework (UPF) *}
section\<open>The Core of the Unified Policy Framework (UPF)\<close>
theory
UPFCore
imports
@@ -49,8 +49,8 @@ theory
begin


subsection{* Foundation *}
text{*
subsection\<open>Foundation\<close>
text\<open>
The purpose of this theory is to formalize a somewhat non-standard view
on the fundamental concept of a security policy which is worth outlining.
This view has arisen from prior experience in the modelling of network
@@ -74,37 +74,37 @@ text{*
In more detail, we model policies as partial functions based on input
data $\alpha$ (arguments, system state, security context, ...) to output
data $\beta$:
*}
\<close>

datatype '\<alpha> decision = allow '\<alpha> | deny '\<alpha>

type_synonym ('\<alpha>,'\<beta>) policy = "'\<alpha> \<rightharpoonup> '\<beta> decision" (infixr "|->" 0)

text{*In the following, we introduce a number of shortcuts and alternative notations.
The type of policies is represented as: *}
text\<open>In the following, we introduce a number of shortcuts and alternative notations.
The type of policies is represented as:\<close>

translations (type) "'\<alpha> |-> '\<beta>" <= (type) "'\<alpha> \<rightharpoonup> '\<beta> decision"
type_notation "policy" (infixr "\<mapsto>" 0)

text{* ... allowing the notation @{typ "'\<alpha> \<mapsto> '\<beta>"} for the policy type and the
text\<open>... allowing the notation @{typ "'\<alpha> \<mapsto> '\<beta>"} for the policy type and the
alternative notations for @{term None} and @{term Some} of the \HOL library
@{typ "'\<alpha> option"} type:*}
@{typ "'\<alpha> option"} type:\<close>

notation "None" ("\<bottom>")
notation "Some" ("\<lfloor>_\<rfloor>" 80)

text{* Thus, the range of a policy may consist of @{term "\<lfloor>accept x\<rfloor>"} data,
text\<open>Thus, the range of a policy may consist of @{term "\<lfloor>accept x\<rfloor>"} data,
of @{term "\<lfloor>deny x\<rfloor>"} data, as well as @{term "\<bottom>"} modeling the undefinedness
of a policy, i.e. a policy is considered as a partial function. Partial
functions are used since we describe elementary policies by partial system
behaviour, which are glued together by operators such as function override and
functional composition.
*}
\<close>

text{* We define the two fundamental sets, the allow-set $Allow$ and the
text\<open>We define the two fundamental sets, the allow-set $Allow$ and the
deny-set $Deny$ (written $A$ and $D$ set for short), to characterize these
two main sets of the range of a policy.
*}
\<close>
definition Allow :: "('\<alpha> decision) set"
where "Allow = range allow"

@@ -112,13 +112,13 @@ definition Deny :: "('\<alpha> decision) set"
where "Deny = range deny"

subsection{* Policy Constructors *}
text{*
subsection\<open>Policy Constructors\<close>
text\<open>
Most elementary policy constructors are based on the
update operation @{thm [source] "Fun.fun_upd_def"} @{thm Fun.fun_upd_def}
and the maplet-notation @{term "a(x \<mapsto> y)"} used for @{term "a(x:=\<lfloor>y\<rfloor>)"}.

Furthermore, we add notation adopted to our problem domain: *}
Furthermore, we add notation adopted to our problem domain:\<close>

nonterminal policylets and policylet

@@ -137,14 +137,14 @@ translations
"_MapUpd m (_policylet2 x y)" \<rightleftharpoons> "m(x := CONST Some (CONST deny y))"
"\<emptyset>" \<rightleftharpoons> "CONST Map.empty"

text{* Here are some lemmas essentially showing syntactic equivalences: *}
text\<open>Here are some lemmas essentially showing syntactic equivalences:\<close>
lemma test: "\<emptyset>(x\<mapsto>\<^sub>+a, y\<mapsto>\<^sub>-b) = \<emptyset>(x \<mapsto>\<^sub>+ a, y \<mapsto>\<^sub>- b)" by simp

lemma test2: "p(x\<mapsto>\<^sub>+a,x\<mapsto>\<^sub>-b) = p(x\<mapsto>\<^sub>-b)" by simp

text{*
text\<open>
We inherit a fairly rich theory on policy updates from Map here. Some examples are:
*}
\<close>

lemma pol_upd_triv1: "t k = \<lfloor>allow x\<rfloor> \<Longrightarrow> t(k\<mapsto>\<^sub>+x) = t"
by (rule ext) simp
@@ -168,14 +168,14 @@ lemma pol_upd_neq1 [simp]: "m(a\<mapsto>\<^sub>+x) \<noteq> n(a\<mapsto>\<^sub>-
by(auto dest: map_upd_eqD1)


subsection{* Override Operators *}
text{*
subsection\<open>Override Operators\<close>
text\<open>
Key operators for constructing policies are the override operators. There are four different
versions of them, with one of them being the override operator from the Map theory. As it is
common to compose policy rules in a ``left-to-right-first-fit''-manner, that one is taken as
default, defined by a syntax translation from the provided override operator from the Map
theory (which does it in reverse order).
*}
\<close>

syntax
"_policyoverride" :: "['a \<mapsto> 'b, 'a \<mapsto> 'b] \<Rightarrow> 'a \<mapsto> 'b" (infixl "\<Oplus>" 100)
@@ -183,9 +183,9 @@ translations
"p \<Oplus> q" \<rightleftharpoons> "q ++ p"


text{*
text\<open>
Some elementary facts inherited from Map are:
*}
\<close>

lemma override_empty: "p \<Oplus> \<emptyset> = p"
by simp
@@ -196,10 +196,10 @@ lemma empty_override: "\<emptyset> \<Oplus> p = p"
lemma override_assoc: "p1 \<Oplus> (p2 \<Oplus> p3) = (p1 \<Oplus> p2) \<Oplus> p3"
by simp

text{*
text\<open>
The following two operators are variants of the standard override. For override\_A,
an allow of wins over a deny. For override\_D, the situation is dual.
*}
\<close>

definition override_A :: "['\<alpha>\<mapsto>'\<beta>, '\<alpha>\<mapsto>'\<beta>] \<Rightarrow> '\<alpha>\<mapsto>'\<beta>" (infixl "++'_A" 100)
where "m2 ++_A m1 =
@@ -268,15 +268,15 @@ lemma override_D_assoc: "p1 \<Oplus>\<^sub>D (p2 \<Oplus>\<^sub>D p3) = (p1 \<Op
apply (simp add: override_D_def split: decision.splits option.splits)
done

subsection{* Coercion Operators *}
text{*
subsection\<open>Coercion Operators\<close>
text\<open>
Often, especially when combining policies of different type, it is necessary to
adapt the input or output domain of a policy to a more refined context.
*}
\<close>

text{*
text\<open>
An analogous for the range of a policy is defined as follows:
*}
\<close>

definition policy_range_comp :: "['\<beta>\<Rightarrow>'\<gamma>, '\<alpha>\<mapsto>'\<beta>] \<Rightarrow> '\<alpha> \<mapsto>'\<gamma>" (infixl "o'_f" 55)
where
@@ -296,10 +296,10 @@ lemma policy_range_comp_strict : "f o\<^sub>f \<emptyset> = \<emptyset>"
done


text{*
text\<open>
A generalized version is, where separate coercion functions are applied to the result
depending on the decision of the policy is as follows:
*}
\<close>

definition range_split :: "[('\<beta>\<Rightarrow>'\<gamma>)\<times>('\<beta>\<Rightarrow>'\<gamma>),'\<alpha> \<mapsto> '\<beta>] \<Rightarrow> '\<alpha> \<mapsto> '\<gamma>"
(infixr "\<nabla>" 100)
@@ -331,9 +331,9 @@ lemma range_split_charn:
done
done

text{*
text\<open>
The connection between these two becomes apparent if considering the following lemma:
*}
\<close>

lemma range_split_vs_range_compose: "(f,f) \<nabla> p = f o\<^sub>f p"
by(simp add: range_split_charn policy_range_comp_def)
@@ -364,14 +364,14 @@ lemma range_split_bi_compose [simp]: "(f1,f2) \<nabla> (g1,g2) \<nabla> p = (f1
done
done

text{*
text\<open>
The next three operators are rather exotic and in most cases not used.
*}
\<close>

text {*
text \<open>
The following is a variant of range\_split, where the change in the decision depends
on the input instead of the output.
*}
\<close>

definition dom_split2a :: "[('\<alpha> \<rightharpoonup> '\<gamma>) \<times> ('\<alpha> \<rightharpoonup>'\<gamma>),'\<alpha> \<mapsto> '\<beta>] \<Rightarrow> '\<alpha> \<mapsto> '\<gamma>" (infixr "\<Delta>a" 100)
where "P \<Delta>a p = (\<lambda>x. case p x of
@@ -391,11 +391,11 @@ where "P \<nabla>2 p = (\<lambda>x. case p x of
| \<lfloor>deny y\<rfloor> \<Rightarrow> \<lfloor>deny (y,(snd P) x)\<rfloor>
| \<bottom> \<Rightarrow> \<bottom>)"

text{*
text\<open>
The following operator is used for transition policies only: a transition policy is transformed
into a state-exception monad. Such a monad can for example be used for test case generation using
HOL-Testgen~\cite{brucker.ea:theorem-prover:2012}.
*}
\<close>

definition policy2MON :: "('\<iota>\<times>'\<sigma> \<mapsto> 'o\<times>'\<sigma>) \<Rightarrow> ('\<iota> \<Rightarrow>('o decision,'\<sigma>) MON\<^sub>S\<^sub>E)"
where "policy2MON p = (\<lambda> \<iota> \<sigma>. case p (\<iota>,\<sigma>) of


+ 2
- 2
UPF/document/root.bib View File

@@ -8,7 +8,7 @@
# {\providecommand{\isbn}{\textsc{isbn}} }
# {\providecommand{\Cpp}{C++} }
# {\providecommand{\Specsharp}{Spec\#} }
# {\providecommand{\doi}[1]{\href{http://dx.doi.org/#1}{doi:
# {\providecommand{\doi}[1]{\href{https://doi.org/#1}{doi:
{\urlstyle{rm}\nolinkurl{#1}}}}} }
@STRING{conf-sacmat="ACM symposium on access control models and technologies
(SACMAT)" }
@@ -319,7 +319,7 @@
revocation are provided, and proofs are given for the
important properties of our delegation framework.},
issn = {0306-4379},
doi = {http://dx.doi.org/10.1016/j.is.2005.11.008},
doi = {https://doi.org/10.1016/j.is.2005.11.008},
publisher = pub-elsevier,
address = {Oxford, UK, UK},
tags = {ReadingList, SoKNOS},


Loading…
Cancel
Save