diff --git a/Analysis.thy b/Analysis.thy index 043ddc2..1c81919 100644 --- a/Analysis.thy +++ b/Analysis.thy @@ -42,7 +42,7 @@ (* $Id: Analysis.thy 10953 2014-11-24 11:23:40Z wolff $ *) -header{* Properties on Policies *} +section{* Properties on Policies *} theory Analysis imports @@ -54,14 +54,14 @@ text {* In this theory, several standard policy properties are paraphrased in UPF terms. *} -section{* Basic Properties *} +subsection{* Basic Properties *} -subsection{* A Policy Has no Gaps *} +subsubsection{* A Policy Has no Gaps *} definition gap_free :: "('a \ 'b) \ bool" where "gap_free p = (dom p = UNIV)" -subsection{* Comparing Policies *} +subsubsection{* Comparing Policies *} text {* Policy p is more defined than q: *} definition more_defined :: "('a \ 'b) \('a \ 'b) \bool" where "more_defined p q = (dom q \ dom p)" @@ -128,7 +128,7 @@ lemma "A\<^sub>I \\<^sub>A p" unfolding more_permissive_def allow_all_fun_def allow_pfun_def allow_all_id_def by(auto split: option.split decision.split) -section{* Combined Data-Policy Refinement *} +subsection{* Combined Data-Policy Refinement *} definition policy_refinement :: "('a \ 'b) \ ('a' \ 'a) \('b' \ 'b) \ ('a' \ 'b') \ bool" @@ -163,7 +163,7 @@ apply(erule_tac x=" (f' a')" in allE, auto) done -section {* Equivalence of Policies *} +subsection {* Equivalence of Policies *} subsubsection{* Equivalence over domain D *} definition p_eq_dom :: "('a \ 'b) \ 'a set \ ('a \ 'b) \bool" ("_ \\<^bsub>_\<^esub> _" [60,60,60]60) @@ -190,7 +190,7 @@ shows "no_conflicts p q" apply (metis)+ done -subsection{* Miscellaneous *} +subsubsection{* Miscellaneous *} lemma dom_inter: "\dom p \ dom q = {}; p x = \y\\ \ q x = \" by (auto) diff --git a/ElementaryPolicies.thy b/ElementaryPolicies.thy index c64ffe4..d4c4eae 100644 --- a/ElementaryPolicies.thy +++ b/ElementaryPolicies.thy @@ -41,7 +41,7 @@ ******************************************************************************) (* $Id: ElementaryPolicies.thy 10945 2014-11-21 12:50:43Z wolff $ *) -header{* Elementary Policies *} +section{* Elementary Policies *} theory ElementaryPolicies imports @@ -54,7 +54,7 @@ text{* policies defined in this theory. *} -section{* The Core Policy Combinators: Allow and Deny Everything *} +subsection{* The Core Policy Combinators: Allow and Deny Everything *} definition deny_pfun :: "('\ \'\) \ ('\ \ '\)" ("AllD") @@ -111,7 +111,7 @@ lemma neq_Allow_Deny: "pf \ \ \ (deny_pfun pf) apply (auto) done -section{* Common Instances *} +subsection{* Common Instances *} definition allow_all_fun :: "('\ \ '\) \ ('\ \ '\)" ("A\<^sub>f") where "allow_all_fun f = allow_pfun (Some o f)" @@ -161,7 +161,7 @@ lemma deny_left_cancel :"dom pf = UNIV \ (deny_pfun pf) \ apply (auto simp: deny_pfun_def option.splits) done -section{* Domain, Range, and Restrictions *} +subsection{* Domain, Range, and Restrictions *} text{* Since policies are essentially maps, we inherit the basic definitions for @@ -196,6 +196,7 @@ lemma sub_ran : "ran p \ Allow \ Deny" apply (auto simp: Allow_def Deny_def ran_def full_SetCompr_eq[symmetric]) apply (case_tac "x") apply (simp_all) + apply (rename_tac \) apply (erule_tac x="\" in allE) apply (simp) done diff --git a/Monads.thy b/Monads.thy index c0affba..e5ea02b 100644 --- a/Monads.thy +++ b/Monads.thy @@ -41,14 +41,14 @@ ******************************************************************************) (* $Id: Monads.thy 10922 2014-11-10 15:41:49Z wolff $ *) -header {* Basic Monad Theory for Sequential Computations *} +section {* Basic Monad Theory for Sequential Computations *} theory Monads imports Main begin -section{* General Framework for Monad-based Sequence-Test *} +subsection{* General Framework for Monad-based Sequence-Test *} text{* As such, Higher-order Logic as a purely functional specification formalism has no built-in mechanism for state and state-transitions. Forms of testing involving state require therefore @@ -69,7 +69,7 @@ text{* \end{enumerate} *} -subsection{* State Exception Monads *} +subsubsection{* State Exception Monads *} type_synonym ('o, '\) MON\<^sub>S\<^sub>E = "'\ \ ('o \ '\)" definition bind_SE :: "('o,'\)MON\<^sub>S\<^sub>E \ ('o \ ('o','\)MON\<^sub>S\<^sub>E) \ ('o','\)MON\<^sub>S\<^sub>E" @@ -262,7 +262,7 @@ by(simp add: malt_SE_def) lemma malt_SE_cons [simp]: "\\<^sub>S\<^sub>E (a # S) = (a \\<^sub>S\<^sub>E (\\<^sub>S\<^sub>E S))" by(simp add: malt_SE_def) -subsection{* State-Backtrack Monads *} +subsubsection{* State-Backtrack Monads *} text{*This subsection is still rudimentary and as such an interesting formal analogue to the previous monad definitions. It is doubtful that it is interesting for testing and as a computational structure at all. @@ -301,7 +301,7 @@ lemma bind_assoc_SB: "(y := (x := m; k); h) = (x := m; (y := k; h))" apply (simp add: unit_SB_def bind_SB_def split_def) done -subsection{* State Backtrack Exception Monad *} +subsubsection{* State Backtrack Exception Monad *} text{* The following combination of the previous two Monad-Constructions allows for the semantic foundation of a simple generic assertion language in the style of Schirmer's Simpl-Language or @@ -370,7 +370,7 @@ next by(rule_tac x="(a, b)" in bexI, simp_all) next case goal4 then show ?case - apply (erule_tac Q="None = ?X" in contrapos_pp) + apply (erule_tac Q="None = X" for X in contrapos_pp) apply (erule_tac x="(aa,b)" and P="\ x. None \ split (\out. k) x" in ballE) apply (auto simp: aux (*Option.not_None_eq*) image_def split_def intro!: rev_bexI) done @@ -390,7 +390,7 @@ next qed -section{* Valid Test Sequences in the State Exception Monad *} +subsection{* Valid Test Sequences in the State Exception Monad *} text{* This is still an unstructured merge of executable monad concepts and specification oriented high-level properties initiating test procedures. @@ -553,7 +553,7 @@ lemma [code]: "(\ \ m) = (case (m \) of None \ \\<^sub>2\<^sub>I p = \" apply (simp add: prod_2_id_def prod_2_def) done -section{* Combinators for Transition Policies *} +subsection{* Combinators for Transition Policies *} text {* For constructing transition policies, two additional combinators are required: one combines state transitions by pairing the states, the other works equivalently on general maps. @@ -231,7 +231,7 @@ where "p1 \\<^sub>S p2 = (p1 \\<^sub>M p2) o (\ (a,b,c). ((a,b),a,c))" -section{* Range Splitting *} +subsection{* Range Splitting *} text{* The following combinator is a special case of both a parallel composition operator and a range splitting operator. Its primary use case is when combining a policy with state transitions. @@ -257,7 +257,7 @@ lemma comp_ran_split_charn: apply (auto) done -section {* Distributivity of the parallel combinators *} +subsection {* Distributivity of the parallel combinators *} lemma distr_or1_a: "(F = F1 \ F2) \ (((N \\<^sub>1 F) o f) = (((N \\<^sub>1 F1) o f) \ ((N \\<^sub>1 F2) o f))) " diff --git a/SeqComposition.thy b/SeqComposition.thy index 0410984..9ee5dea 100644 --- a/SeqComposition.thy +++ b/SeqComposition.thy @@ -40,7 +40,7 @@ ******************************************************************************) (* $Id: SeqComposition.thy 10879 2014-10-26 11:35:31Z brucker $ *) -header{* Sequential Composition *} +section{* Sequential Composition *} theory SeqComposition imports @@ -52,7 +52,7 @@ text{* the second policy to the output of the first one. Again, there are four possibilities how the decisions can be combined. *} -section {* Flattening *} +subsection {* Flattening *} text{* A key concept of sequential policy composition is the flattening of nested decisions. There are four possibilities, and these possibilities will give the various flavours of policy composition. @@ -65,22 +65,20 @@ where "flat_orA(allow(allow y)) = allow y" lemma flat_orA_deny[dest]:"flat_orA x = deny y \ x = deny(deny y)" apply (case_tac "x") - apply (simp_all) - apply (case_tac "\") - apply (simp_all) - apply (case_tac "\") - apply (simp_all) + apply (rename_tac \) + apply (case_tac "\", simp_all)[1] + apply (rename_tac \) + apply (case_tac "\", simp_all)[1] done lemma flat_orA_allow[dest]: "flat_orA x = allow y \ x = allow(allow y) \ x = allow(deny y) \ x = deny(allow y)" apply (case_tac "x") - apply (simp_all) - apply (case_tac "\") - apply (simp_all) - apply (case_tac "\") - apply (simp_all) + apply (rename_tac \) + apply (case_tac "\", simp_all)[1] + apply (rename_tac \) + apply (case_tac "\", simp_all)[1] done fun flat_orD :: "('\ decision) decision \ ('\ decision)" @@ -91,22 +89,20 @@ where "flat_orD(allow(allow y)) = allow y" lemma flat_orD_allow[dest]: "flat_orD x = allow y \ x = allow(allow y)" apply (case_tac "x") - apply (simp_all) - apply (case_tac "\") - apply (simp_all) - apply (case_tac "\") - apply (simp_all) + apply (rename_tac \) + apply (case_tac "\", simp_all)[1] + apply (rename_tac \) + apply (case_tac "\", simp_all)[1] done lemma flat_orD_deny[dest]: "flat_orD x = deny y \ x = deny(deny y) \ x = allow(deny y) \ x = deny(allow y)" apply (case_tac "x") - apply (simp_all) - apply (case_tac "\") - apply (simp_all) - apply (case_tac "\") - apply (simp_all) + apply (rename_tac \) + apply (case_tac "\", simp_all)[1] + apply (rename_tac \) + apply (case_tac "\", simp_all)[1] done fun flat_1 :: "('\ decision) decision \ ('\ decision)" @@ -117,20 +113,18 @@ where "flat_1(allow(allow y)) = allow y" lemma flat_1_allow[dest]: "flat_1 x = allow y \ x = allow(allow y) \ x = allow(deny y)" apply (case_tac "x") - apply (simp_all) - apply (case_tac "\") - apply (simp_all) - apply (case_tac "\") - apply (simp_all) + apply (rename_tac \) + apply (case_tac "\", simp_all)[1] + apply (rename_tac \) + apply (case_tac "\", simp_all)[1] done lemma flat_1_deny[dest]: "flat_1 x = deny y \ x = deny(deny y) \ x = deny(allow y)" apply (case_tac "x") - apply (simp_all) - apply (case_tac "\") - apply (simp_all) - apply (case_tac "\") - apply (simp_all) + apply (rename_tac \) + apply (case_tac "\", simp_all)[1] + apply (rename_tac \) + apply (case_tac "\", simp_all)[1] done fun flat_2 :: "('\ decision) decision \ ('\ decision)" @@ -141,23 +135,21 @@ where "flat_2(allow(allow y)) = allow y" lemma flat_2_allow[dest]: "flat_2 x = allow y \ x = allow(allow y) \ x = deny(allow y)" apply (case_tac "x") - apply (simp_all) - apply (case_tac "\") - apply (simp_all) - apply (case_tac "\") - apply (simp_all) + apply (rename_tac \) + apply (case_tac "\", simp_all)[1] + apply (rename_tac \) + apply (case_tac "\", simp_all)[1] done lemma flat_2_deny[dest]: "flat_2 x = deny y \ x = deny(deny y) \ x = allow(deny y)" apply (case_tac "x") - apply (simp_all) - apply (case_tac "\") - apply (simp_all) - apply (case_tac "\") - apply (simp_all) + apply (rename_tac \) + apply (case_tac "\", simp_all)[1] + apply (rename_tac \) + apply (case_tac "\", simp_all)[1] done -section{* Policy Composition *} +subsection{* Policy Composition *} text{* The following definition allows to compose two policies. Denies and allows are transferred. *} diff --git a/Service.thy b/Service.thy index 34ff7ab..6672a32 100644 --- a/Service.thy +++ b/Service.thy @@ -40,7 +40,7 @@ ******************************************************************************) (* $Id: Service.thy 10945 2014-11-21 12:50:43Z wolff $ *) -header {* Secure Service Specification *} +section {* Secure Service Specification *} theory Service imports @@ -51,8 +51,8 @@ text {* that allows the staff in a hospital to access health care records of patients. *} -section{* Datatypes for Modelling Users and Roles*} -subsection {* Users *} +subsection{* Datatypes for Modelling Users and Roles*} +subsubsection {* Users *} text{* First, we introduce a type for users that we use to model that each staff member has a unique id: @@ -64,7 +64,7 @@ text {* *} type_synonym patient = int (* Each patient gets a unique id *) -subsection {* Roles and Relationships*} +subsubsection {* Roles and Relationships*} text{* In our example, we assume three different roles for members of the clinical staff: *} datatype role = ClinicalPractitioner | Nurse | Clerical @@ -83,8 +83,8 @@ type_synonym \ = "patient \ LR" text{* The user context stores the roles the users are in. *} type_synonym \ = "user \ role" -section {* Modelling Health Records and the Web Service API*} -subsection {* Health Records *} +subsection {* Modelling Health Records and the Web Service API*} +subsubsection {* Health Records *} text {* The content and the status of the entries of a health record *} datatype data = dummyContent datatype status = Open | Closed @@ -93,7 +93,7 @@ type_synonym entry = "status \ user \ data" type_synonym SCR = "(entry_id \ entry)" type_synonym DB = "patient \ SCR" -subsection {* The Web Service API *} +subsubsection {* The Web Service API *} text{* The operations provided by the service: *} datatype Operation = createSCR user role patient | appendEntry user role patient entry_id entry @@ -207,7 +207,7 @@ fun allContentStatic where |"allContentStatic [] = True" -section{* Modelling Access Control*} +subsection{* Modelling Access Control*} text {* In the following, we define a rather complex access control model for our scenario that extends traditional role-based access control @@ -217,7 +217,7 @@ text {* for a general motivation and explanation of break-the-glass access control). *} -subsection {* Sealed Envelopes *} +subsubsection {* Sealed Envelopes *} type_synonym SEPolicy = "(Operation \ DB \ unit) " @@ -259,7 +259,7 @@ definition SEPolicy :: SEPolicy where lemmas SEsimps = SEPolicy_def get_entry_def userHasAccess_def editEntrySE_def deleteEntrySE_def readEntrySE_def -subsection {* Legitimate Relationships *} +subsubsection {* Legitimate Relationships *} type_synonym LRPolicy = "(Operation \ \, unit) policy" @@ -365,7 +365,7 @@ definition FunPolicy where removeLRFunPolicy \ readSCRFunPolicy \ addLRFunPolicy \ createFunPolicy \ A\<^sub>U" -subsection{* Modelling Core RBAC *} +subsubsection{* Modelling Core RBAC *} type_synonym RBACPolicy = "Operation \ \ \ unit" @@ -389,9 +389,9 @@ definition RBACPolicy :: RBACPolicy where then \allow ()\ else \deny ()\)" -section {* The State Transitions and Output Function*} +subsection {* The State Transitions and Output Function*} -subsection{* State Transition *} +subsubsection{* State Transition *} fun OpSuccessDB :: "(Operation \ DB) \ DB" where "OpSuccessDB (createSCR u r p,S) = (case S p of \ \ \S(p\\)\ @@ -434,7 +434,7 @@ fun OpSuccessSigma :: "(Operation \ \) \ \" fun OpSuccessUC :: "(Operation \ \) \ \" where "OpSuccessUC (f,u) = \u\" -subsection {* Output *} +subsubsection {* Output *} type_synonym Output = unit @@ -445,7 +445,7 @@ fun OpSuccessOutput :: "(Operation) \ Output" where fun OpFailOutput :: "Operation \ Output" where "OpFailOutput x = \()\" -section {* Combine All Parts *} +subsection {* Combine All Parts *} definition SE_LR_Policy :: "(Operation \ DB \ \, unit) policy" where "SE_LR_Policy = (\(x,x). x) o\<^sub>f (SEPolicy \\<^sub>\\<^sub>D LR_Policy) o (\(a,b,c). ((a,b),a,c))" diff --git a/ServiceExample.thy b/ServiceExample.thy index 5260836..64edfba 100644 --- a/ServiceExample.thy +++ b/ServiceExample.thy @@ -40,7 +40,7 @@ ******************************************************************************) (* $Id: ServiceExample.thy 10954 2014-11-24 12:43:29Z wolff $ *) -header {* Instantiating Our Secure Service Example *} +section {* Instantiating Our Secure Service Example *} theory ServiceExample imports @@ -52,7 +52,7 @@ text {* two patients: *} -section {* Access Control Configuration *} +subsection {* Access Control Configuration *} definition alice :: user where "alice = 1" definition bob :: user where "bob = 2" definition charlie :: user where "charlie = 3" @@ -86,11 +86,11 @@ definition LR1 :: LR where definition \0 :: \ where "\0 = (empty(patient1\LR1))" -section {* The Initial System State *} +subsection {* The Initial System State *} definition \0 :: "DB \ \\\" where "\0 = (Spine0,\0,UC0)" -section{* Basic Properties *} +subsection{* Basic Properties *} lemma [simp]: "(case a of allow d \ \X\ | deny d2 \ \Y\) = \ \ False" by (case_tac a,simp_all) diff --git a/UPF.thy b/UPF.thy index 01136d3..7e0f232 100644 --- a/UPF.thy +++ b/UPF.thy @@ -41,7 +41,7 @@ ******************************************************************************) (* $Id: UPF.thy 10879 2014-10-26 11:35:31Z brucker $ *) -header {* Putting Everything Together: UPF *} +section {* Putting Everything Together: UPF *} theory UPF imports diff --git a/UPFCore.thy b/UPFCore.thy index 5df141b..499e05d 100644 --- a/UPFCore.thy +++ b/UPFCore.thy @@ -1,400 +1,400 @@ -(***************************************************************************** - * HOL-TestGen --- theorem-prover based test case generation - * http://www.brucker.ch/projects/hol-testgen/ - * - * UPF - * This file is part of HOL-TestGen. - * - * Copyright (c) 2005-2012 ETH Zurich, Switzerland - * 2008-2014 Achim D. Brucker, Germany - * 2009-2014 Université Paris-Sud, France - * - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * * Neither the name of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ******************************************************************************) -(* $Id: UPFCore.thy 10951 2014-11-21 21:54:46Z wolff $ *) - -header{* The Core of the Unified Policy Framework (UPF) *} -theory - UPFCore -imports - Monads -begin - - -section{* Foundation *} -text{* - The purpose of this theory is to formalize a somewhat non-standard view - on the fundamental concept of a security policy which is worth outlining. - This view has arisen from prior experience in the modelling of network - (firewall) policies. Instead of regarding policies as relations on resources, - sets of permissions, etc., we emphasise the view that a policy is a policy - decision function that grants or denies access to resources, permissions, etc. - In other words, we model the concrete function that implements the policy - decision point in a system, and which represents a "wrapper" around the - business logic. An advantage of this view is that it is compatible - with many different policy models, enabling a uniform modelling framework to be - defined. Furthermore, this function is typically a large cascade of nested - conditionals, using conditions referring to an internal state and security - contexts of the system or a user. This cascade of conditionals can easily be - decomposed into a set of test cases similar to transformations used for binary - decision diagrams (BDD), and motivate equivalence class testing for unit test and - sequence test scenarios. From the modelling perspective, using \HOL as - its input language, we will consequently use the expressive power of its - underlying functional programming language, including the possibility to - define higher-order combinators. - - In more detail, we model policies as partial functions based on input - data $\alpha$ (arguments, system state, security context, ...) to output - data $\beta$: -*} - -datatype '\ decision = allow '\ | deny '\ - -type_synonym ('\,'\) policy = "'\ \ '\ decision" (infixr "|->" 0) - -text{*In the following, we introduce a number of shortcuts and alternative notations. -The type of policies is represented as: *} - -translations (type) "'\ |-> '\" <= (type) "'\ \ '\ decision" -type_notation (xsymbols) "policy" (infixr "\" 0) - -text{* ... allowing the notation @{typ "'\ \ '\"} for the policy type and the -alternative notations for @{term None} and @{term Some} of the \HOL library -@{typ "'\ option"} type:*} - -notation "None" ("\") -notation "Some" ("\_\" 80) - -text{* Thus, the range of a policy may consist of @{term "\accept x\"} data, - of @{term "\deny x\"} data, as well as @{term "\"} modeling the undefinedness - of a policy, i.e. a policy is considered as a partial function. Partial - functions are used since we describe elementary policies by partial system - behaviour, which are glued together by operators such as function override and - functional composition. -*} - -text{* We define the two fundamental sets, the allow-set $Allow$ and the - deny-set $Deny$ (written $A$ and $D$ set for short), to characterize these - two main sets of the range of a policy. -*} -definition Allow :: "('\ decision) set" -where "Allow = range allow" - -definition Deny :: "('\ decision) set" -where "Deny = range deny" - - -section{* Policy Constructors *} -text{* - Most elementary policy constructors are based on the - update operation @{thm [source] "Fun.fun_upd_def"} @{thm Fun.fun_upd_def} - and the maplet-notation @{term "a(x \ y)"} used for @{term "a(x:=\y\)"}. - - Furthermore, we add notation adopted to our problem domain: *} - -nonterminal policylets and policylet - -syntax - "_policylet1" :: "['a, 'a] => policylet" ("_ /+=/ _") - "_policylet2" :: "['a, 'a] => policylet" ("_ /-=/ _") - "" :: "policylet => policylets" ("_") - "_Maplets" :: "[policylet, policylets] => policylets" ("_,/ _") - "_Maplets" :: "[policylet, policylets] => policylets" ("_,/ _") - "_MapUpd" :: "['a |-> 'b, policylets] => 'a |-> 'b" ("_/'(_')" [900,0]900) - -syntax (xsymbols) - "_policylet1" :: "['a, 'a] => policylet" ("_ /\\<^sub>+/ _") - "_policylet2" :: "['a, 'a] => policylet" ("_ /\\<^sub>-/ _") - "_emptypolicy" :: "'a |-> 'b" ("\") - -translations - "_MapUpd m (_Maplets xy ms)" \ "_MapUpd (_MapUpd m xy) ms" - "_MapUpd m (_policylet1 x y)" \ "m(x := CONST Some (CONST allow y))" - "_MapUpd m (_policylet2 x y)" \ "m(x := CONST Some (CONST deny y))" - "\" \ "CONST empty" - -text{* Here are some lemmas essentially showing syntactic equivalences: *} -lemma test: "empty(x+=a, y-= b) = \(x \\<^sub>+ a, y \\<^sub>- b)" by simp - -lemma test2: "p(x\\<^sub>+a,x\\<^sub>-b) = p(x\\<^sub>-b)" by simp - -text{* - We inherit a fairly rich theory on policy updates from Map here. Some examples are: -*} - -lemma pol_upd_triv1: "t k = \allow x\ \ t(k\\<^sub>+x) = t" - by (rule ext) simp - -lemma pol_upd_triv2: "t k = \deny x\ \ t(k\\<^sub>-x) = t" - by (rule ext) simp - -lemma pol_upd_allow_nonempty: "t(k\\<^sub>+x) \ \" - by simp - -lemma pol_upd_deny_nonempty: "t(k\\<^sub>-x) \ \" - by simp - -lemma pol_upd_eqD1 : "m(a\\<^sub>+x) = n(a\\<^sub>+y) \ x = y" - by(auto dest: map_upd_eqD1) - -lemma pol_upd_eqD2 : "m(a\\<^sub>-x) = n(a\\<^sub>-y) \ x = y" - by(auto dest: map_upd_eqD1) - -lemma pol_upd_neq1 [simp]: "m(a\\<^sub>+x) \ n(a\\<^sub>-y)" - by(auto dest: map_upd_eqD1) - - -section{* Override Operators *} -text{* - Key operators for constructing policies are the override operators. There are four different - versions of them, with one of them being the override operator from the Map theory. As it is - common to compose policy rules in a ``left-to-right-first-fit''-manner, that one is taken as - default, defined by a syntax translation from the provided override operator from the Map - theory (which does it in reverse order). -*} - -syntax - "_policyoverride" :: "['a \ 'b, 'a \ 'b] \ 'a \ 'b" (infixl "(+p/)" 100) - -syntax (xsymbols) - "_policyoverride" :: "['a \ 'b, 'a \ 'b] \ 'a \ 'b" (infixl "\" 100) - -translations - "p \ q" \ "q ++ p" - - -text{* - Some elementary facts inherited from Map are: -*} - -lemma override_empty: "p \ \ = p" - by simp - -lemma empty_override: "\ \ p = p" - by simp - -lemma override_assoc: "p1 \ (p2 \ p3) = (p1 \ p2) \ p3" - by simp - -text{* - The following two operators are variants of the standard override. For override\_A, - an allow of wins over a deny. For override\_D, the situation is dual. -*} - -definition override_A :: "['\\'\, '\\'\] \ '\\'\" (infixl "++'_A" 100) -where "m2 ++_A m1 = - (\x. (case m1 x of - \allow a\ \ \allow a\ - | \deny a\ \ (case m2 x of \allow b\ \ \allow b\ - | _ \ \deny a\) - | \ \ m2 x) - )" - -syntax (xsymbols) - "_policyoverride_A" :: "['a \ 'b, 'a \ 'b] \ 'a \ 'b" (infixl "\\<^sub>A" 100) - -translations - "p \\<^sub>A q" \ "p ++_A q" - -lemma override_A_empty[simp]: "p \\<^sub>A \ = p" - by(simp add:override_A_def) - -lemma empty_override_A[simp]: "\ \\<^sub>A p = p" - apply (rule ext) - apply (simp add:override_A_def) - apply (case_tac "p x") - apply (simp_all) - apply (case_tac a) - apply (simp_all) -done - - -lemma override_A_assoc: "p1 \\<^sub>A (p2 \\<^sub>A p3) = (p1 \\<^sub>A p2) \\<^sub>A p3" - by (rule ext, simp add: override_A_def split: decision.splits option.splits) - -definition override_D :: "['\\'\, '\\'\] \ '\\'\" (infixl "++'_D" 100) -where "m1 ++_D m2 = - (\x. case m2 x of - \deny a\ \ \deny a\ - | \allow a\ \ (case m1 x of \deny b\ \ \deny b\ - | _ \ \allow a\) - | \ \ m1 x - )" - -syntax (xsymbols) - "_policyoverride_D" :: "['a \ 'b, 'a \ 'b] \ 'a \ 'b" (infixl "\\<^sub>D" 100) -translations - "p \\<^sub>D q" \ "p ++_D q" - -lemma override_D_empty[simp]: "p \\<^sub>D \ = p" - by(simp add:override_D_def) - -lemma empty_override_D[simp]: "\ \\<^sub>D p = p" - apply (rule ext) - apply (simp add:override_D_def) - apply (case_tac "p x", simp_all) - apply (case_tac a, simp_all) -done - -lemma override_D_assoc: "p1 \\<^sub>D (p2 \\<^sub>D p3) = (p1 \\<^sub>D p2) \\<^sub>D p3" - apply (rule ext) - apply (simp add: override_D_def split: decision.splits option.splits) -done - -section{* Coercion Operators *} -text{* - Often, especially when combining policies of different type, it is necessary to - adapt the input or output domain of a policy to a more refined context. -*} - -text{* - An analogous for the range of a policy is defined as follows: -*} - -definition policy_range_comp :: "['\\'\, '\\'\] \ '\ \'\" (infixl "o'_f" 55) -where - "f o_f p = (\x. case p x of - \allow y\ \ \allow (f y)\ - | \deny y\ \ \deny (f y)\ - | \ \ \)" - -syntax (xsymbols) - "_policy_range_comp" :: "['\\'\, '\\'\] \ '\ \'\" (infixl "o\<^sub>f" 55) - -translations - "p o\<^sub>f q" \ "p o_f q" - -lemma policy_range_comp_strict : "f o\<^sub>f \ = \" - apply (rule ext) - apply (simp add: policy_range_comp_def) -done - - -text{* - A generalized version is, where separate coercion functions are applied to the result - depending on the decision of the policy is as follows: -*} - -definition range_split :: "[('\\'\)\('\\'\),'\ \ '\] \ '\ \ '\" - (infixr "\" 100) -where "(P) \ p = (\x. case p x of - \allow y\ \ \allow ((fst P) y)\ - | \deny y\ \ \deny ((snd P) y)\ - | \ \ \)" - -lemma range_split_strict[simp]: "P \ \ = \" - apply (rule ext) - apply (simp add: range_split_def) -done - - -lemma range_split_charn: - "(f,g) \ p = (\x. case p x of - \allow x\ \ \allow (f x)\ - | \deny x\ \ \deny (g x)\ - | \ \ \)" - apply (simp add: range_split_def) - apply (rule ext) - apply (case_tac "p x") - apply (simp_all) - apply (case_tac "a") - apply (simp_all) -done - -text{* - The connection between these two becomes apparent if considering the following lemma: -*} - -lemma range_split_vs_range_compose: "(f,f) \ p = f o\<^sub>f p" - by(simp add: range_split_charn policy_range_comp_def) - -lemma range_split_id [simp]: "(id,id) \ p = p" - apply (rule ext) - apply (simp add: range_split_charn id_def) - apply (case_tac "p x") - apply (simp_all) - apply (case_tac "a") - apply (simp_all) -done - -lemma range_split_bi_compose [simp]: "(f1,f2) \ (g1,g2) \ p = (f1 o g1,f2 o g2) \ p" - apply (rule ext) - apply (simp add: range_split_charn comp_def) - apply (case_tac "p x") - apply (simp_all) - apply (case_tac "a") - apply (simp_all) -done - - -text{* - The next three operators are rather exotic and in most cases not used. -*} - -text {* - The following is a variant of range\_split, where the change in the decision depends - on the input instead of the output. -*} - -definition dom_split2a :: "[('\ \ '\) \ ('\ \'\),'\ \ '\] \ '\ \ '\" (infixr "\a" 100) -where "P \a p = (\x. case p x of - \allow y\ \ \allow (the ((fst P) x))\ - | \deny y\ \ \deny (the ((snd P) x))\ - | \ \ \)" - -definition dom_split2 :: "[('\ \ '\) \ ('\ \'\),'\ \ '\] \ '\ \ '\" (infixr "\" 100) -where "P \ p = (\x. case p x of - \allow y\ \ \allow ((fst P) x)\ - | \deny y\ \ \deny ((snd P) x)\ - | \ \ \)" - -definition range_split2 :: "[('\ \ '\) \ ('\ \'\),'\ \ '\] \ '\ \ ('\ \'\)" (infixr "\2" 100) -where "P \2 p = (\x. case p x of - \allow y\ \ \allow (y,(fst P) x)\ - | \deny y\ \ \deny (y,(snd P) x)\ - | \ \ \)" - -text{* - The following operator is used for transition policies only: a transition policy is transformed - into a state-exception monad. Such a monad can for example be used for test case generation using - HOL-Testgen~\cite{brucker.ea:theorem-prover:2012}. -*} - -definition policy2MON :: "('\\'\ \ 'o\'\) \ ('\ \('o decision,'\) MON\<^sub>S\<^sub>E)" -where "policy2MON p = (\ \ \. case p (\,\) of - \(allow (outs,\'))\ \ \(allow outs, \')\ - | \(deny (outs,\'))\ \ \(deny outs, \')\ - | \ \ \)" - -lemmas UPFCoreDefs = Allow_def Deny_def override_A_def override_D_def policy_range_comp_def - range_split_def dom_split2_def map_add_def restrict_map_def -end - +(***************************************************************************** + * HOL-TestGen --- theorem-prover based test case generation + * http://www.brucker.ch/projects/hol-testgen/ + * + * UPF + * This file is part of HOL-TestGen. + * + * Copyright (c) 2005-2012 ETH Zurich, Switzerland + * 2008-2014 Achim D. Brucker, Germany + * 2009-2014 Université Paris-Sud, France + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * + * * Neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + ******************************************************************************) +(* $Id: UPFCore.thy 10951 2014-11-21 21:54:46Z wolff $ *) + +section{* The Core of the Unified Policy Framework (UPF) *} +theory + UPFCore +imports + Monads +begin + + +subsection{* Foundation *} +text{* + The purpose of this theory is to formalize a somewhat non-standard view + on the fundamental concept of a security policy which is worth outlining. + This view has arisen from prior experience in the modelling of network + (firewall) policies. Instead of regarding policies as relations on resources, + sets of permissions, etc., we emphasise the view that a policy is a policy + decision function that grants or denies access to resources, permissions, etc. + In other words, we model the concrete function that implements the policy + decision point in a system, and which represents a "wrapper" around the + business logic. An advantage of this view is that it is compatible + with many different policy models, enabling a uniform modelling framework to be + defined. Furthermore, this function is typically a large cascade of nested + conditionals, using conditions referring to an internal state and security + contexts of the system or a user. This cascade of conditionals can easily be + decomposed into a set of test cases similar to transformations used for binary + decision diagrams (BDD), and motivate equivalence class testing for unit test and + sequence test scenarios. From the modelling perspective, using \HOL as + its input language, we will consequently use the expressive power of its + underlying functional programming language, including the possibility to + define higher-order combinators. + + In more detail, we model policies as partial functions based on input + data $\alpha$ (arguments, system state, security context, ...) to output + data $\beta$: +*} + +datatype '\ decision = allow '\ | deny '\ + +type_synonym ('\,'\) policy = "'\ \ '\ decision" (infixr "|->" 0) + +text{*In the following, we introduce a number of shortcuts and alternative notations. +The type of policies is represented as: *} + +translations (type) "'\ |-> '\" <= (type) "'\ \ '\ decision" +type_notation (xsymbols) "policy" (infixr "\" 0) + +text{* ... allowing the notation @{typ "'\ \ '\"} for the policy type and the +alternative notations for @{term None} and @{term Some} of the \HOL library +@{typ "'\ option"} type:*} + +notation "None" ("\") +notation "Some" ("\_\" 80) + +text{* Thus, the range of a policy may consist of @{term "\accept x\"} data, + of @{term "\deny x\"} data, as well as @{term "\"} modeling the undefinedness + of a policy, i.e. a policy is considered as a partial function. Partial + functions are used since we describe elementary policies by partial system + behaviour, which are glued together by operators such as function override and + functional composition. +*} + +text{* We define the two fundamental sets, the allow-set $Allow$ and the + deny-set $Deny$ (written $A$ and $D$ set for short), to characterize these + two main sets of the range of a policy. +*} +definition Allow :: "('\ decision) set" +where "Allow = range allow" + +definition Deny :: "('\ decision) set" +where "Deny = range deny" + + +subsection{* Policy Constructors *} +text{* + Most elementary policy constructors are based on the + update operation @{thm [source] "Fun.fun_upd_def"} @{thm Fun.fun_upd_def} + and the maplet-notation @{term "a(x \ y)"} used for @{term "a(x:=\y\)"}. + + Furthermore, we add notation adopted to our problem domain: *} + +nonterminal policylets and policylet + +syntax + "_policylet1" :: "['a, 'a] => policylet" ("_ /+=/ _") + "_policylet2" :: "['a, 'a] => policylet" ("_ /-=/ _") + "" :: "policylet => policylets" ("_") + "_Maplets" :: "[policylet, policylets] => policylets" ("_,/ _") + "_Maplets" :: "[policylet, policylets] => policylets" ("_,/ _") + "_MapUpd" :: "['a |-> 'b, policylets] => 'a |-> 'b" ("_/'(_')" [900,0]900) + +syntax (xsymbols) + "_policylet1" :: "['a, 'a] => policylet" ("_ /\\<^sub>+/ _") + "_policylet2" :: "['a, 'a] => policylet" ("_ /\\<^sub>-/ _") + "_emptypolicy" :: "'a |-> 'b" ("\") + +translations + "_MapUpd m (_Maplets xy ms)" \ "_MapUpd (_MapUpd m xy) ms" + "_MapUpd m (_policylet1 x y)" \ "m(x := CONST Some (CONST allow y))" + "_MapUpd m (_policylet2 x y)" \ "m(x := CONST Some (CONST deny y))" + "\" \ "CONST empty" + +text{* Here are some lemmas essentially showing syntactic equivalences: *} +lemma test: "empty(x+=a, y-= b) = \(x \\<^sub>+ a, y \\<^sub>- b)" by simp + +lemma test2: "p(x\\<^sub>+a,x\\<^sub>-b) = p(x\\<^sub>-b)" by simp + +text{* + We inherit a fairly rich theory on policy updates from Map here. Some examples are: +*} + +lemma pol_upd_triv1: "t k = \allow x\ \ t(k\\<^sub>+x) = t" + by (rule ext) simp + +lemma pol_upd_triv2: "t k = \deny x\ \ t(k\\<^sub>-x) = t" + by (rule ext) simp + +lemma pol_upd_allow_nonempty: "t(k\\<^sub>+x) \ \" + by simp + +lemma pol_upd_deny_nonempty: "t(k\\<^sub>-x) \ \" + by simp + +lemma pol_upd_eqD1 : "m(a\\<^sub>+x) = n(a\\<^sub>+y) \ x = y" + by(auto dest: map_upd_eqD1) + +lemma pol_upd_eqD2 : "m(a\\<^sub>-x) = n(a\\<^sub>-y) \ x = y" + by(auto dest: map_upd_eqD1) + +lemma pol_upd_neq1 [simp]: "m(a\\<^sub>+x) \ n(a\\<^sub>-y)" + by(auto dest: map_upd_eqD1) + + +subsection{* Override Operators *} +text{* + Key operators for constructing policies are the override operators. There are four different + versions of them, with one of them being the override operator from the Map theory. As it is + common to compose policy rules in a ``left-to-right-first-fit''-manner, that one is taken as + default, defined by a syntax translation from the provided override operator from the Map + theory (which does it in reverse order). +*} + +syntax + "_policyoverride" :: "['a \ 'b, 'a \ 'b] \ 'a \ 'b" (infixl "(+p/)" 100) + +syntax (xsymbols) + "_policyoverride" :: "['a \ 'b, 'a \ 'b] \ 'a \ 'b" (infixl "\" 100) + +translations + "p \ q" \ "q ++ p" + + +text{* + Some elementary facts inherited from Map are: +*} + +lemma override_empty: "p \ \ = p" + by simp + +lemma empty_override: "\ \ p = p" + by simp + +lemma override_assoc: "p1 \ (p2 \ p3) = (p1 \ p2) \ p3" + by simp + +text{* + The following two operators are variants of the standard override. For override\_A, + an allow of wins over a deny. For override\_D, the situation is dual. +*} + +definition override_A :: "['\\'\, '\\'\] \ '\\'\" (infixl "++'_A" 100) +where "m2 ++_A m1 = + (\x. (case m1 x of + \allow a\ \ \allow a\ + | \deny a\ \ (case m2 x of \allow b\ \ \allow b\ + | _ \ \deny a\) + | \ \ m2 x) + )" + +syntax (xsymbols) + "_policyoverride_A" :: "['a \ 'b, 'a \ 'b] \ 'a \ 'b" (infixl "\\<^sub>A" 100) + +translations + "p \\<^sub>A q" \ "p ++_A q" + +lemma override_A_empty[simp]: "p \\<^sub>A \ = p" + by(simp add:override_A_def) + +lemma empty_override_A[simp]: "\ \\<^sub>A p = p" + apply (rule ext) + apply (simp add:override_A_def) + apply (case_tac "p x") + apply (simp_all) + apply (case_tac a) + apply (simp_all) +done + + +lemma override_A_assoc: "p1 \\<^sub>A (p2 \\<^sub>A p3) = (p1 \\<^sub>A p2) \\<^sub>A p3" + by (rule ext, simp add: override_A_def split: decision.splits option.splits) + +definition override_D :: "['\\'\, '\\'\] \ '\\'\" (infixl "++'_D" 100) +where "m1 ++_D m2 = + (\x. case m2 x of + \deny a\ \ \deny a\ + | \allow a\ \ (case m1 x of \deny b\ \ \deny b\ + | _ \ \allow a\) + | \ \ m1 x + )" + +syntax (xsymbols) + "_policyoverride_D" :: "['a \ 'b, 'a \ 'b] \ 'a \ 'b" (infixl "\\<^sub>D" 100) +translations + "p \\<^sub>D q" \ "p ++_D q" + +lemma override_D_empty[simp]: "p \\<^sub>D \ = p" + by(simp add:override_D_def) + +lemma empty_override_D[simp]: "\ \\<^sub>D p = p" + apply (rule ext) + apply (simp add:override_D_def) + apply (case_tac "p x", simp_all) + apply (case_tac a, simp_all) +done + +lemma override_D_assoc: "p1 \\<^sub>D (p2 \\<^sub>D p3) = (p1 \\<^sub>D p2) \\<^sub>D p3" + apply (rule ext) + apply (simp add: override_D_def split: decision.splits option.splits) +done + +subsection{* Coercion Operators *} +text{* + Often, especially when combining policies of different type, it is necessary to + adapt the input or output domain of a policy to a more refined context. +*} + +text{* + An analogous for the range of a policy is defined as follows: +*} + +definition policy_range_comp :: "['\\'\, '\\'\] \ '\ \'\" (infixl "o'_f" 55) +where + "f o_f p = (\x. case p x of + \allow y\ \ \allow (f y)\ + | \deny y\ \ \deny (f y)\ + | \ \ \)" + +syntax (xsymbols) + "_policy_range_comp" :: "['\\'\, '\\'\] \ '\ \'\" (infixl "o\<^sub>f" 55) + +translations + "p o\<^sub>f q" \ "p o_f q" + +lemma policy_range_comp_strict : "f o\<^sub>f \ = \" + apply (rule ext) + apply (simp add: policy_range_comp_def) +done + + +text{* + A generalized version is, where separate coercion functions are applied to the result + depending on the decision of the policy is as follows: +*} + +definition range_split :: "[('\\'\)\('\\'\),'\ \ '\] \ '\ \ '\" + (infixr "\" 100) +where "(P) \ p = (\x. case p x of + \allow y\ \ \allow ((fst P) y)\ + | \deny y\ \ \deny ((snd P) y)\ + | \ \ \)" + +lemma range_split_strict[simp]: "P \ \ = \" + apply (rule ext) + apply (simp add: range_split_def) +done + + +lemma range_split_charn: + "(f,g) \ p = (\x. case p x of + \allow x\ \ \allow (f x)\ + | \deny x\ \ \deny (g x)\ + | \ \ \)" + apply (simp add: range_split_def) + apply (rule ext) + apply (case_tac "p x") + apply (simp_all) + apply (case_tac "a") + apply (simp_all) +done + +text{* + The connection between these two becomes apparent if considering the following lemma: +*} + +lemma range_split_vs_range_compose: "(f,f) \ p = f o\<^sub>f p" + by(simp add: range_split_charn policy_range_comp_def) + +lemma range_split_id [simp]: "(id,id) \ p = p" + apply (rule ext) + apply (simp add: range_split_charn id_def) + apply (case_tac "p x") + apply (simp_all) + apply (case_tac "a") + apply (simp_all) +done + +lemma range_split_bi_compose [simp]: "(f1,f2) \ (g1,g2) \ p = (f1 o g1,f2 o g2) \ p" + apply (rule ext) + apply (simp add: range_split_charn comp_def) + apply (case_tac "p x") + apply (simp_all) + apply (case_tac "a") + apply (simp_all) +done + + +text{* + The next three operators are rather exotic and in most cases not used. +*} + +text {* + The following is a variant of range\_split, where the change in the decision depends + on the input instead of the output. +*} + +definition dom_split2a :: "[('\ \ '\) \ ('\ \'\),'\ \ '\] \ '\ \ '\" (infixr "\a" 100) +where "P \a p = (\x. case p x of + \allow y\ \ \allow (the ((fst P) x))\ + | \deny y\ \ \deny (the ((snd P) x))\ + | \ \ \)" + +definition dom_split2 :: "[('\ \ '\) \ ('\ \'\),'\ \ '\] \ '\ \ '\" (infixr "\" 100) +where "P \ p = (\x. case p x of + \allow y\ \ \allow ((fst P) x)\ + | \deny y\ \ \deny ((snd P) x)\ + | \ \ \)" + +definition range_split2 :: "[('\ \ '\) \ ('\ \'\),'\ \ '\] \ '\ \ ('\ \'\)" (infixr "\2" 100) +where "P \2 p = (\x. case p x of + \allow y\ \ \allow (y,(fst P) x)\ + | \deny y\ \ \deny (y,(snd P) x)\ + | \ \ \)" + +text{* + The following operator is used for transition policies only: a transition policy is transformed + into a state-exception monad. Such a monad can for example be used for test case generation using + HOL-Testgen~\cite{brucker.ea:theorem-prover:2012}. +*} + +definition policy2MON :: "('\\'\ \ 'o\'\) \ ('\ \('o decision,'\) MON\<^sub>S\<^sub>E)" +where "policy2MON p = (\ \ \. case p (\,\) of + \(allow (outs,\'))\ \ \(allow outs, \')\ + | \(deny (outs,\'))\ \ \(deny outs, \')\ + | \ \ \)" + +lemmas UPFCoreDefs = Allow_def Deny_def override_A_def override_D_def policy_range_comp_def + range_split_def dom_split2_def map_add_def restrict_map_def +end + diff --git a/document/root.tex b/document/root.tex index 7f9def7..254dc47 100644 --- a/document/root.tex +++ b/document/root.tex @@ -36,13 +36,6 @@ \urlstyle{rm} \isabellestyle{it} -\renewcommand{\isamarkupheader}[1]{\section{#1}} -\renewcommand{\isamarkupsection}[1]{\subsection{#1}} -\renewcommand{\isamarkupsubsection}[1]{\subsubsection{#1}} -\renewcommand{\isamarkupsubsubsection}[1]{\paragraph{#1}} -\renewcommand{\isamarkupsect}[1]{\subsection{#1}} -\renewcommand{\isamarkupsubsect}[1]{\susubsection{#1}} -\renewcommand{\isamarkupsubsubsect}[1]{\paragraph{#1}} \renewcommand{\isastyle}{\isastyleminor} \pagestyle{empty}