Compare commits
69 Commits
aab92eadfd
...
809e485205
Author | SHA1 | Date |
---|---|---|
Achim D. Brucker | 809e485205 | |
Gerwin Klein | deb6cfada8 | |
Gerwin Klein | 9bf39e287b | |
Gerwin Klein | d9df1c5621 | |
Gerwin Klein | d5fcddecfd | |
Gerwin Klein | 77050f3034 | |
Gerwin Klein | fb2caf86bc | |
Michael McInerney | c178c09eca | |
Gerwin Klein | 1ea5d8b092 | |
Michael McInerney | e5895dbd64 | |
Michael McInerney | abc0990d4d | |
Michael McInerney | 7fdfee1a00 | |
Michael McInerney | 7775f42bc9 | |
Michael McInerney | 992aa5f657 | |
Michael McInerney | 4ac9ff06ec | |
Michael McInerney | 650771902d | |
Michael McInerney | 74d8f98e85 | |
Michael McInerney | f857b158a1 | |
Michael McInerney | d212944e50 | |
Michael McInerney | ff5e7da215 | |
Michael McInerney | b94a78c88c | |
Michael McInerney | ea943b5a6d | |
Michael McInerney | d4dd05ca30 | |
Michael McInerney | e160e257a0 | |
Michael McInerney | e508d15554 | |
Michael McInerney | 71255d25bf | |
Michael McInerney | f03be1244c | |
Michael McInerney | c840839ab7 | |
Michael McInerney | 375b19261a | |
Michael McInerney | 7493e71298 | |
Gerwin Klein | 3daad13f39 | |
Gerwin Klein | cddd42ae76 | |
Gerwin Klein | 2b17160c2b | |
Gerwin Klein | 28197a5b9e | |
Gerwin Klein | d2940797c4 | |
Gerwin Klein | 6cc6e1b52f | |
Gerwin Klein | e01e42943e | |
Michael McInerney | 8f0b505826 | |
Rafal Kolanski | 18640b8db6 | |
Gerwin Klein | 289bf94e2c | |
Gerwin Klein | ee7c8101da | |
Gerwin Klein | c2e964eb3c | |
Gerwin Klein | 7bf0cbd8f2 | |
Gerwin Klein | f1f1027125 | |
Gerwin Klein | a881c04a37 | |
Gerwin Klein | fdaec17711 | |
Gerwin Klein | f2a0f7cfc9 | |
Gerwin Klein | 550feb18dd | |
Gerwin Klein | 02928556ff | |
Gerwin Klein | 65dabc2a0e | |
Michael McInerney | 4d19f6616f | |
Michael McInerney | 5568eb56a1 | |
Michael McInerney | 5b618c7fe4 | |
Michael McInerney | e122ad9d92 | |
Corey Lewis | af3505401b | |
Gerwin Klein | ec1f38c8bc | |
Gerwin Klein | 5a5e5e363d | |
Rafal Kolanski | ab7fdfeebe | |
Rafal Kolanski | 2f6771cb50 | |
Rafal Kolanski | a22c624031 | |
Gerwin Klein | f089db3448 | |
Gerwin Klein | 5b5fb045d8 | |
Gerwin Klein | 1182415f0c | |
Gerwin Klein | cd40ce33a3 | |
Gerwin Klein | 9e06a820bf | |
Gerwin Klein | ab8b7d2e4d | |
Gerwin Klein | 35256907e9 | |
Gerwin Klein | eb65c07d67 | |
Achim D. Brucker | 3f12bcde49 |
|
@ -37,6 +37,7 @@ jobs:
|
|||
fail-fast: false
|
||||
matrix:
|
||||
arch: [ARM, ARM_HYP, AARCH64, RISCV64, X64]
|
||||
num_domains: ['1', '']
|
||||
# test only most recent push:
|
||||
concurrency: l4v-regression-${{ github.ref }}-${{ strategy.job-index }}
|
||||
steps:
|
||||
|
@ -45,6 +46,7 @@ jobs:
|
|||
with:
|
||||
L4V_ARCH: ${{ matrix.arch }}
|
||||
xml: ${{ needs.code.outputs.xml }}
|
||||
NUM_DOMAINS: ${{ matrix.num_domains }}
|
||||
env:
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
|
@ -58,7 +60,7 @@ jobs:
|
|||
- name: Upload logs
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: logs-${{ matrix.arch }}
|
||||
name: logs-${{ matrix.num_domains }}-${{ matrix.arch }}
|
||||
path: logs.tar.xz
|
||||
|
||||
deploy:
|
||||
|
@ -77,6 +79,13 @@ jobs:
|
|||
token: ${{ secrets.PRIV_REPO_TOKEN }}
|
||||
tag: "l4v/proof-deploy/${{ github.event_name }}"
|
||||
|
||||
# Automatically rebase platform branches on pushes to master.
|
||||
# This workflow here on the master branch attempts a git rebase of the platform
|
||||
# branches listed in the build matrix below. If the rebase succeeds, the rebased
|
||||
# branch is pushed under the name `<branch>-rebased`. This triggers the build
|
||||
# workflow on the `<branch>-rebased` branch, which will run the proofs. If the
|
||||
# proofs succeed, the `<branch>-rebased` branch is force-pushed over
|
||||
# `<branch>`, becoming the new platform branch.
|
||||
rebase:
|
||||
name: Rebase platform branches
|
||||
runs-on: ubuntu-latest
|
||||
|
|
|
@ -18,11 +18,13 @@ jobs:
|
|||
fail-fast: false
|
||||
matrix:
|
||||
arch: [ARM, ARM_HYP, AARCH64, RISCV64, X64]
|
||||
num_domains: ['1', '7', '']
|
||||
steps:
|
||||
- name: Proofs
|
||||
uses: seL4/ci-actions/aws-proofs@master
|
||||
with:
|
||||
L4V_ARCH: ${{ matrix.arch }}
|
||||
NUM_DOMAINS: ${{ matrix.num_domains }}
|
||||
cache_read: '' # start with empty cache, but write cache back (default)
|
||||
env:
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
|
@ -38,7 +40,7 @@ jobs:
|
|||
- name: Upload logs
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: logs-${{ matrix.arch }}
|
||||
name: logs-${{ matrix.num_domains }}-${{ matrix.arch }}
|
||||
path: logs.tar.xz
|
||||
|
||||
binary-verification:
|
||||
|
|
|
@ -28,8 +28,8 @@ assistant [Isabelle/HOL][2]. For an introduction to Isabelle, see its
|
|||
[official website][2] and [documentation][3].
|
||||
|
||||
[1]: https://github.com/seL4/l4v "L4.verified Repository"
|
||||
[2]: http://isabelle.in.tum.de "Isabelle Website"
|
||||
[3]: http://isabelle.in.tum.de/documentation.html "Isabelle Documentation"
|
||||
[2]: https://isabelle.in.tum.de "Isabelle Website"
|
||||
[3]: https://isabelle.in.tum.de/documentation.html "Isabelle Documentation"
|
||||
|
||||
<a name="setup"></a>
|
||||
Setup
|
||||
|
|
|
@ -0,0 +1,392 @@
|
|||
<!--
|
||||
Copyright 2023, Proofcraft Pty Ltd
|
||||
|
||||
SPDX-License-Identifier: CC-BY-SA-4.0
|
||||
-->
|
||||
|
||||
# Commit Messages
|
||||
|
||||
## Objective
|
||||
|
||||
The l4v repository is relatively large, active, and long-lived. It has a public
|
||||
history of about one decade, and an additional decade of unreleased private
|
||||
history. It will hopefully live on for another 20 years.
|
||||
|
||||
This means that the commit history is important. Examples of questions the commit
|
||||
history should be able to answer reasonably quickly and painlessly are:
|
||||
|
||||
- > Is this written in a strange way for good reasons, or were we just in a hurry?
|
||||
> What was the reason? Does it still apply?
|
||||
|
||||
- > When did we change doing things this way and why?
|
||||
|
||||
- > Has this always been broken or was there a seemingly unrelated change that broke it?
|
||||
|
||||
- > How long did it take to finish this proof?
|
||||
|
||||
- > How much change was necessary to do this proof?
|
||||
|
||||
- > Where did this library lemma come from?
|
||||
|
||||
## General
|
||||
|
||||
The [seL4 repository guidelines][git-conventions] apply to the `l4v` repository,
|
||||
with the following exceptions and additions:
|
||||
|
||||
- header can be max 78 characters
|
||||
- body is wrapped at 78 characters
|
||||
- we use tags in the header to indicate which part of the repository
|
||||
the commit applies to
|
||||
|
||||
We are using 78 for the header because of the tags, which take up some space. If
|
||||
you can manage to stay within 50 characters anyway, that is appreciated, but it's
|
||||
not always practical. Using a body wrap of 72 is also allowed, since that is the
|
||||
default for other seL4 repositories.
|
||||
|
||||
We use tags, because the repository is relatively large and most commits only
|
||||
apply to small parts of it. The tags make it easy to identify relevance of a
|
||||
commit at a glance while scanning through a large number of commits.
|
||||
|
||||
The general guidelines prescribe capitalising the commit header. We do not
|
||||
capitalise the tag or after the tag, but we do capitalise the (rare) cases where
|
||||
there is no tag in the commit header.
|
||||
|
||||
## Header and Body
|
||||
|
||||
There is good general advice on [commit message writing][commit-messages]
|
||||
available on the internet and it is as relevant to proofs as it is to code.
|
||||
You should read it, it's not long and it's good advice.
|
||||
|
||||
Repeating some high-level points:
|
||||
|
||||
- Use imperative voice (e.g. `proof: rename lemma foo to bar`)
|
||||
- The header should be a short summary, focusing on "what"
|
||||
- The body should explain what is going on in more detail (also in imperative
|
||||
voice), but much more importantly *why* it is going on (is `bar` more
|
||||
consistent than `foo`? Is the name `foo` needed for something else? Does `bar`
|
||||
just better describe what is going on?).
|
||||
- You are trying to explain things to your future self or a future colleague
|
||||
5-10 years from now. You can assume knowledge of the repository in general,
|
||||
but you should not assume specific context that is obvious to you right now,
|
||||
but that will not be known to a different person 5 years from now.
|
||||
|
||||
## Tags
|
||||
|
||||
- We use tags to indicate which part of the repository the commit applies to,
|
||||
and if it is architecture-specific, then to which architecture it applies to.
|
||||
|
||||
- We do not usually use branch tags, because git branches are ephemeral and we
|
||||
are using rebase branches for most of our work. The one exception is the `rt`
|
||||
branch, which has been alive for over half a decade. For this branch, we allow
|
||||
merge commits (from branch `master` into `rt` only), and we want to be able to
|
||||
reconstruct a rebase history from that at the end of the branch's life.
|
||||
|
||||
This means, we do use the tag `rt` for commits that only make sense on this
|
||||
branch. If you could apply the commit to the master branch directly (e.g. you're
|
||||
adding a lemma to a library), it should not get the tag. Otherwise it should.
|
||||
|
||||
### Tag Examples
|
||||
|
||||
The main tags we use are mostly the directory names of the main proof something
|
||||
is in, e.g. `refine`, `crefine`, `sys-init`, `camkes`. For some of these, there
|
||||
are abbreviations, mainly `aspec` for the abstract spec and `ainvs` for the
|
||||
abstract invariants.
|
||||
|
||||
For large directories that have logical sub parts, we use slashes, e.g.
|
||||
`lib/monads`. Not so much because the change is in that directory, but because
|
||||
we want to see that it's a library change and applies to the monad part of the
|
||||
library.
|
||||
|
||||
If the change applies to many proofs, for instance large lemma renaming commits,
|
||||
we use tags such as `proof` and `spec`.
|
||||
|
||||
We combine tags with `+` if a change applies to multiple parts, e.g.
|
||||
`clib+crefine` or `lib+proof`.
|
||||
|
||||
If something is specific to an architecture we preface the tag with the
|
||||
architecture, e.g. `arm refine:` or `aarch64 aspec+ainvs:`. The current
|
||||
architecture tags are: `arm`, `arm-hyp`, `x64`, `riscv`, `aarch64`.
|
||||
Please use these spellings only.
|
||||
|
||||
More tag examples:
|
||||
|
||||
- `trivial`: for small trivial changes like fixing a typo, where no proofs or
|
||||
specs have changed, i.e.\ that would not need a proof CI run.
|
||||
- `cleanup:` for cleanups that do not change proof content
|
||||
- `github:` for GitHub CI changes
|
||||
- `run_tests`: for changes to the top-level `run_tests` file
|
||||
- `isabelle20xx`: for easily identifying commits related to Isabelle updates
|
||||
|
||||
For more consistency there is an order between tags. More abstract/general
|
||||
things should come first, e.g. `lib` < `aspec` < `haskell` < `ainvs` < `refine`
|
||||
`orphanage` < `crefine`. Or `dspec` < `drefine` and `access` < `infoflow`. Specs
|
||||
< proofs and `infoflow` < refinement proofs. This is not a total order, it's Ok
|
||||
to use your own judgement.
|
||||
|
||||
Because `lib` has many subdirectories and separate parts, it's fine to use
|
||||
session names as tags there to shorten things a bit, e.g. `clib`, `monads`,
|
||||
`word_lib` instead of `lib/clib`, `lib/monads`, or `lib/word_lib`. This makes
|
||||
sense when the tags are session names.
|
||||
|
||||
See also the longer example list of [good tags](#good-tags) below.
|
||||
|
||||
## Tips and Tools
|
||||
|
||||
### Looking at history
|
||||
|
||||
The main tools to interact with the git history are browsing it on GitHub and
|
||||
various forms of `git log`:
|
||||
|
||||
```sh
|
||||
git log --oneline # show only headings
|
||||
git log # show commit info with author, date, message
|
||||
git log --stat # additionally show which files have changed
|
||||
git log --p # additionally show full diff
|
||||
```
|
||||
|
||||
For all of these, you can supply a path to restrict the log to a certain file
|
||||
or directory in the repo. You can also supply a branch, or a commit range like
|
||||
`master..rt` to restrict the output.
|
||||
|
||||
Especially `git log --oneline` is useful for quickly getting an overview. Example
|
||||
output:
|
||||
|
||||
```
|
||||
b3c6df48a clib: improve ccorres_While
|
||||
49ff8457f clib+crefine: improve and consolidate variants of ccorres_to_vcg
|
||||
8c433c085 clib: add some rules for hoarep
|
||||
82b954817 clib: improve ccorres_call_getter_setter
|
||||
8c59df449 lib/monads: remove more uses of _tac methods
|
||||
563232397 run_tests+proof: exclude SimplExportAndRefine for AARCH64
|
||||
1cce5b3ff proof: switch AArch64 quick_and_dirty from Refine to CRefine
|
||||
402a8342d run_tests: enable CBaseRefine for AARCH64
|
||||
32a672412 aarch64 cspec: add Kernel_C.thy to base CKernel image on
|
||||
b2cd1ce4a aarch64 asmrefine: copy ArchSetup from RISCV64
|
||||
67c0109b7 lib/monads: avoid clarsimp as initial Isar method
|
||||
bd5026438 lib/monads: fix remaining document preparation issues
|
||||
4d0086567 lib/monads: add new Trace_* files to ROOT
|
||||
598e19dd6 lib/monads: coherent document structure
|
||||
4cbfb4ab0 lib/monads: minor style + warning cleanup
|
||||
b2dd5db6d lib/monads: fix document preparation issues
|
||||
03a045309 lib/monads: add AFP document setup
|
||||
d0bab9c79 misc/scripts: remove Darwin cpp wrapper
|
||||
```
|
||||
|
||||
You can very quickly see that C verification has been active recently, that
|
||||
new tests were added, that AARCH64 refinement proofs have been done, and that there was
|
||||
some work to do with the AFP and the monad library. You can see that nothing
|
||||
has happened with the system initialiser or other user-level proofs, and that there
|
||||
are no changes that should affect, for instance, the C parser.
|
||||
|
||||
You only see such things quickly when the messages are consistent and follow the
|
||||
same kind of pattern. It's not so important what the pattern is. It is important
|
||||
that it is consistent.
|
||||
|
||||
Note in e.g. `proof: switch AArch64 quick_and_dirty from Refine to CRefine` that
|
||||
the architecture tag is used only when the change is specific to files for that
|
||||
architecture. In this commit, the overall ROOTS file is changed, so it shouldn't
|
||||
get the architecture tag.
|
||||
|
||||
### What tag should I pick?
|
||||
|
||||
If you're uncertain what tag to pick for a certain file or directory, the
|
||||
easiest way to figure it out is to do
|
||||
|
||||
```sh
|
||||
git log --oneline <that-file>
|
||||
```
|
||||
|
||||
Do your tags the same way people have done before. This will make the pattern
|
||||
consistent and should be reasonably easy to read even if it's not perfect. Look
|
||||
at a few commits, not only a single one, so you can course correct instead of
|
||||
amplify if someone happened to invent a new flavour.
|
||||
|
||||
You can even do that when you're in the middle of writing a commit message, it's
|
||||
safe to interrupt `git commit` with `Ctrl-Z`, then `bg` in your shell to put
|
||||
it into the background, and then `git log --online <file>` to see the history.
|
||||
|
||||
Any operation that doesn't change the state of the repository is fine (even
|
||||
those that do are fine, but then the commit will probably fail).
|
||||
|
||||
When you're looking into history for tags, use mainly commits from roughly 2018
|
||||
onwards. We weren't very consistent with tags before that. The more recent the
|
||||
more consistent.
|
||||
|
||||
### Good tags
|
||||
|
||||
Here's a list of tags that have been used in the past and that follow the
|
||||
guidelines above.
|
||||
|
||||
```
|
||||
aarch64 ainvs
|
||||
aarch64 ainvs+refine
|
||||
aarch64 asmrefine
|
||||
aarch64 aspec
|
||||
aarch64 aspec+ainvs
|
||||
aarch64 aspec+design
|
||||
aarch64 aspec+haskell
|
||||
aarch64 aspec+machine
|
||||
aarch64 cspec
|
||||
aarch64 design
|
||||
aarch64 design+machine
|
||||
aarch64 haskell
|
||||
aarch64 haskell+design
|
||||
aarch64 haskell+machine
|
||||
aarch64 machine+ainvs
|
||||
aarch64 proof
|
||||
aarch64 refine
|
||||
aarch64 spec+haskell
|
||||
access+infoflow+drefine
|
||||
access+infoflow+crefine+drefine
|
||||
ainvs
|
||||
ainvs+crefine
|
||||
ainvs+refine
|
||||
arm aspec+design
|
||||
arm access
|
||||
arm access+infoflow
|
||||
arm ainvs
|
||||
arm aspec
|
||||
arm crefine
|
||||
arm haskell
|
||||
arm infoflow
|
||||
arm refine
|
||||
arm+arm-hyp crefine
|
||||
arm+arm-hyp machine
|
||||
arm-hyp aspec
|
||||
arm-hyp aspec+design
|
||||
arm-hyp ainvs
|
||||
arm-hyp crefine
|
||||
arm-hyp design
|
||||
arm-hyp haskell
|
||||
arm-hyp haskell+refine
|
||||
arm-hyp machine
|
||||
arm-hyp refine
|
||||
asmrefine
|
||||
aspec
|
||||
aspec+access
|
||||
aspec+ainvs
|
||||
aspec+design+haskell
|
||||
aspec+haskell
|
||||
autocorres
|
||||
bisim
|
||||
c-parser
|
||||
c-parser+autocorres
|
||||
c-parser+crefine
|
||||
camkes
|
||||
capDL
|
||||
ckernel
|
||||
cleanup
|
||||
cleanup ainvs
|
||||
clib
|
||||
clib+crefine
|
||||
crefine
|
||||
crefine+capDL
|
||||
cspec
|
||||
design
|
||||
docs
|
||||
dpolicy
|
||||
drefine
|
||||
dspec
|
||||
dspec+drefine+infoflow
|
||||
github
|
||||
haskell
|
||||
haskell+design
|
||||
haskell-translator
|
||||
infoflow
|
||||
infoflow+crefine
|
||||
infoflow+dpolicy+cdl-refine
|
||||
isabelle-2021
|
||||
isabelle-2021 access
|
||||
isabelle-2021 c-parser
|
||||
lib
|
||||
lib+READMEs
|
||||
lib+aarch64 ainvs
|
||||
lib+aarch64 refine
|
||||
lib+ainvs
|
||||
lib+ainvs+aarch64 ainvs
|
||||
lib+ainvs+aarch64 refine
|
||||
lib+ainvs+access+refine
|
||||
lib+autocorres
|
||||
lib+c-parser
|
||||
lib+crefine
|
||||
lib+proof
|
||||
lib+proof+autocorres
|
||||
lib+proof+tools
|
||||
lib+proof
|
||||
lib+refine+crefine
|
||||
lib+spec
|
||||
lib+spec+proof+autocorres
|
||||
lib+spec+proof
|
||||
lib+sysinit
|
||||
lib+tools
|
||||
lib/apply_debug
|
||||
lib/clib
|
||||
lib/corres_method
|
||||
lib/crunch
|
||||
lib/monads
|
||||
lib/sep_algebra
|
||||
license
|
||||
misc
|
||||
misc/regression
|
||||
misc/scripts
|
||||
misc/stats
|
||||
proof
|
||||
proof+autocorres
|
||||
proof/Makefile
|
||||
proof/ROOT
|
||||
refine
|
||||
refine cleanup
|
||||
refine+crefine
|
||||
refine+orphanage
|
||||
riscv
|
||||
riscv access
|
||||
riscv ainvs
|
||||
riscv ainvs+access
|
||||
riscv aspec
|
||||
riscv aspec+ainvs
|
||||
riscv aspec+haskell
|
||||
riscv crefine
|
||||
riscv cspec+crefine
|
||||
riscv design
|
||||
riscv haskell
|
||||
riscv haskell+design
|
||||
riscv haskell+proof
|
||||
riscv haskell+refine
|
||||
riscv infoflow
|
||||
riscv machine
|
||||
riscv machine+ainvs
|
||||
riscv machine+design+crefine
|
||||
riscv orphanage
|
||||
riscv platform
|
||||
riscv refine
|
||||
riscv access+infoflow+refine+crefine
|
||||
riscv spec
|
||||
riscv+aarch64 ainvs+refine
|
||||
riscv+x64 crefine
|
||||
riscv64+aarch64 ainvs
|
||||
run_tests
|
||||
run_tests+proof
|
||||
spec+proof
|
||||
style
|
||||
sys-init
|
||||
tools
|
||||
tools/asmrefine
|
||||
trivial
|
||||
word_lib
|
||||
word_lib+crefine
|
||||
x64 ainvs+refine+crefine
|
||||
x64 aspec
|
||||
x64 crefine
|
||||
x64 design
|
||||
x64 design/skel
|
||||
x64 haskell
|
||||
x64 machine
|
||||
x64 refine
|
||||
x64 refine+crefine
|
||||
```
|
||||
|
||||
Most of these could be prefixed with `rt` if they only made sense on the `rt`
|
||||
branch, e.g. `rt arm ainv+refine:`
|
||||
|
||||
[git-conventions]: https://docs.sel4.systems/processes/git-conventions.html
|
||||
[commit-messages]: https://chris.beams.io/posts/git-commit/
|
|
@ -10,7 +10,7 @@ This command is for searching for theorems. If you are looking for a
|
|||
constant/function instead, look at [find_consts](find-consts.md).
|
||||
|
||||
There is an introduction to the `find_theorems` command in the
|
||||
[Isabelle/HOL tutorial](http://isabelle.in.tum.de/documentation.html).
|
||||
[Isabelle/HOL tutorial](https://isabelle.in.tum.de/documentation.html).
|
||||
Here we cover some additional material and useful patterns.
|
||||
|
||||
`find_theorems` can be written in the theory as a diagnostic command, or
|
||||
|
|
|
@ -243,4 +243,4 @@ proof-context aware than the 'original' indenter. Pressing `ctrl+i` while some
|
|||
subgoal depth and maintaining the relative indentation of multi-line `apply`
|
||||
statements.
|
||||
|
||||
[isabelle]: http://isabelle.in.tum.de
|
||||
[isabelle]: https://isabelle.in.tum.de
|
||||
|
|
|
@ -481,6 +481,18 @@ lemma corres_if3:
|
|||
(if G then a else b) (if G' then c else d)"
|
||||
by simp
|
||||
|
||||
lemma corres_if_strong:
|
||||
"\<lbrakk>\<And>s s'. \<lbrakk>(s, s') \<in> sr; R s; R' s'\<rbrakk> \<Longrightarrow> G = G';
|
||||
\<lbrakk>G; G'\<rbrakk> \<Longrightarrow> corres_underlying sr nf nf' r P P' a c;
|
||||
\<lbrakk>\<not> G; \<not> G'\<rbrakk> \<Longrightarrow> corres_underlying sr nf nf' r Q Q' b d \<rbrakk>
|
||||
\<Longrightarrow> corres_underlying sr nf nf' r
|
||||
(R and (if G then P else Q)) (R' and (if G' then P' else Q'))
|
||||
(if G then a else b) (if G' then c else d)"
|
||||
by (fastforce simp: corres_underlying_def)
|
||||
|
||||
lemmas corres_if_strong' =
|
||||
corres_if_strong[where R=R and P=R and Q=R for R,
|
||||
where R'=R' and P'=R' and Q'=R' for R', simplified]
|
||||
|
||||
text \<open>Some equivalences about liftM and other useful simps\<close>
|
||||
|
||||
|
@ -740,6 +752,11 @@ lemma corres_assert_assume:
|
|||
by (auto simp: bind_def assert_def fail_def return_def
|
||||
corres_underlying_def)
|
||||
|
||||
lemma corres_assert_assume_l:
|
||||
"corres_underlying sr nf nf' rrel P Q (f ()) g
|
||||
\<Longrightarrow> corres_underlying sr nf nf' rrel (P and (\<lambda>s. P')) Q (assert P' >>= f) g"
|
||||
by (force simp: corres_underlying_def assert_def return_def bind_def fail_def)
|
||||
|
||||
lemma corres_assert_gen_asm_cross:
|
||||
"\<lbrakk> \<And>s s'. \<lbrakk>(s, s') \<in> sr; P' s; Q' s'\<rbrakk> \<Longrightarrow> A;
|
||||
A \<Longrightarrow> corres_underlying sr nf nf' r P Q f (g ()) \<rbrakk>
|
||||
|
|
|
@ -17,6 +17,17 @@ imports
|
|||
Local_Method
|
||||
begin
|
||||
|
||||
section \<open>Warnings\<close>
|
||||
|
||||
method_setup not_visible =
|
||||
\<open>Method.text_closure >> (fn m => fn ctxt => fn facts =>
|
||||
let
|
||||
val ctxt' = Context_Position.set_visible false ctxt
|
||||
fun tac st' = method_evaluate m ctxt' facts st'
|
||||
in SIMPLE_METHOD tac facts end)\<close>
|
||||
\<open>set context visibility to false for suppressing warnings in method execution\<close>
|
||||
|
||||
|
||||
section \<open>Debugging methods\<close>
|
||||
|
||||
method print_concl = (match conclusion in P for P \<Rightarrow> \<open>print_term P\<close>)
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
*)
|
||||
|
||||
theory ExtraCorres
|
||||
imports Corres_UL
|
||||
imports Corres_UL DetWPLib
|
||||
begin
|
||||
|
||||
(* FIXME: the S in this rule is mainly to make the induction work, we don't actually need it in
|
||||
|
@ -181,12 +181,17 @@ qed
|
|||
|
||||
text \<open>Some results concerning the interaction of abstract and concrete states\<close>
|
||||
|
||||
definition ex_abs_underlying :: "('a \<times> 'b) set \<Rightarrow> ('a \<Rightarrow> bool) \<Rightarrow> 'b \<Rightarrow> bool" where
|
||||
"ex_abs_underlying sr P s' \<equiv> \<exists>s. (s,s') \<in> sr \<and> P s"
|
||||
|
||||
lemma ex_absI[intro!]:
|
||||
"(s, s') \<in> sr \<Longrightarrow> P s \<Longrightarrow> ex_abs_underlying sr P s'"
|
||||
by (auto simp add: ex_abs_underlying_def)
|
||||
|
||||
lemma corres_u_nofail:
|
||||
"corres_underlying S nf True r P P' f g \<Longrightarrow> (nf \<Longrightarrow> no_fail P f) \<Longrightarrow>
|
||||
no_fail (\<lambda>s'. \<exists>s. (s,s') \<in> S \<and> P s \<and> P' s') g"
|
||||
apply (clarsimp simp add: corres_underlying_def no_fail_def)
|
||||
apply fastforce
|
||||
done
|
||||
"\<lbrakk>corres_underlying S nf True r P P' f g; nf \<Longrightarrow> no_fail P f\<rbrakk>
|
||||
\<Longrightarrow> no_fail (P' and ex_abs_underlying S P) g"
|
||||
by (fastforce simp: corres_underlying_def ex_abs_underlying_def no_fail_def)
|
||||
|
||||
lemma wp_from_corres_u:
|
||||
"\<lbrakk> corres_underlying R nf nf' r G G' f f'; \<lbrace>P\<rbrace> f \<lbrace>Q\<rbrace>; \<lbrace>P'\<rbrace> f' \<lbrace>Q'\<rbrace>; nf \<Longrightarrow> no_fail P f \<rbrakk> \<Longrightarrow>
|
||||
|
@ -204,7 +209,7 @@ lemma wp_from_corres_u_unit:
|
|||
lemma corres_nofail:
|
||||
"corres_underlying state_relation nf True r P P' f g \<Longrightarrow> (nf \<Longrightarrow> no_fail P f) \<Longrightarrow>
|
||||
no_fail (\<lambda>s'. \<exists>s. (s,s') \<in> state_relation \<and> P s \<and> P' s') g"
|
||||
by (rule corres_u_nofail)
|
||||
by (fastforce intro: no_fail_pre corres_u_nofail simp: ex_abs_underlying_def)
|
||||
|
||||
lemma wp_from_corres_unit:
|
||||
"\<lbrakk> corres_underlying state_relation nf nf' r G G' f f';
|
||||
|
@ -213,13 +218,6 @@ lemma wp_from_corres_unit:
|
|||
f' \<lbrace>\<lambda>_ s'. \<exists>s. (s,s') \<in> state_relation \<and> Q s \<and> Q' s'\<rbrace>"
|
||||
by (auto intro!: wp_from_corres_u_unit)
|
||||
|
||||
definition ex_abs_underlying :: "('a \<times> 'b) set \<Rightarrow> ('a \<Rightarrow> bool) \<Rightarrow> 'b \<Rightarrow> bool" where
|
||||
"ex_abs_underlying sr P s' \<equiv> \<exists>s. (s,s') \<in> sr \<and> P s"
|
||||
|
||||
lemma ex_absI[intro!]:
|
||||
"(s, s') \<in> sr \<Longrightarrow> P s \<Longrightarrow> ex_abs_underlying sr P s'"
|
||||
by (auto simp add: ex_abs_underlying_def)
|
||||
|
||||
lemma corres_underlying_split_ex_abs:
|
||||
assumes ac: "corres_underlying srel nf nf' r' G G' a c"
|
||||
assumes bd: "\<forall>rv rv'. r' rv rv' \<longrightarrow>
|
||||
|
@ -251,6 +249,100 @@ lemma hoare_from_abs_inv:
|
|||
using assms
|
||||
by (fastforce intro: hoare_from_abs[where R=\<top> and S=\<top> and R'=\<top> and Q="\<lambda>_. P" , simplified])
|
||||
|
||||
lemma corres_from_valid:
|
||||
assumes nf': "nf' \<Longrightarrow> no_fail (P' and ex_abs_underlying srel P) f'"
|
||||
assumes
|
||||
"\<And>s. \<lbrace>\<lambda>s'. P s \<and> P' s' \<and> (s, s') \<in> srel\<rbrace>
|
||||
f' \<lbrace>\<lambda>rv' t'. \<exists>(rv, t) \<in> fst (f s). (t, t') \<in> srel \<and> rrel rv rv'\<rbrace>"
|
||||
shows "corres_underlying srel nf nf' rrel P P' f f'"
|
||||
using assms
|
||||
by (fastforce simp: corres_underlying_def valid_def no_fail_def)
|
||||
|
||||
lemma corres_from_valid_det:
|
||||
assumes det: "det_wp P f"
|
||||
assumes nf': "nf' \<Longrightarrow> no_fail (P' and ex_abs_underlying srel P) f'"
|
||||
assumes valid:
|
||||
"\<And>s rv t.
|
||||
\<lbrakk>fst (f s) = {(rv, t)}; P s\<rbrakk>
|
||||
\<Longrightarrow> \<lbrace>\<lambda>s'. P' s' \<and> (s, s') \<in> srel\<rbrace> f' \<lbrace>\<lambda>rv' t'. (t, t') \<in> srel \<and> rrel rv rv'\<rbrace>"
|
||||
shows "corres_underlying srel nf nf' rrel P P' f f'"
|
||||
apply (clarsimp simp: corres_underlying_def)
|
||||
apply (intro conjI)
|
||||
apply (insert det)
|
||||
apply (clarsimp simp: det_wp_def)
|
||||
apply (force dest: use_valid[OF _ valid])
|
||||
apply (fastforce dest: nf' simp: no_fail_def ex_abs_underlying_def)
|
||||
done
|
||||
|
||||
lemma corres_noop_ex_abs:
|
||||
assumes P: "\<And>s. P s \<Longrightarrow> \<lbrace>\<lambda>s'. (s, s') \<in> sr \<and> P' s'\<rbrace> f \<lbrace>\<lambda>rv s'. (s, s') \<in> sr \<and> r x rv\<rbrace>"
|
||||
assumes nf': "nf' \<Longrightarrow> no_fail (P' and ex_abs_underlying sr P) f"
|
||||
shows "corres_underlying sr nf nf' r P P' (return x) f"
|
||||
apply (simp add: corres_underlying_def return_def)
|
||||
apply clarsimp
|
||||
apply (frule P)
|
||||
apply (insert nf')
|
||||
apply (fastforce simp: valid_def no_fail_def ex_abs_underlying_def)
|
||||
done
|
||||
|
||||
lemma corres_symb_exec_r_conj_ex_abs:
|
||||
assumes z: "\<And>rv. corres_underlying sr nf nf' r Q (R' rv) x (y rv)"
|
||||
assumes y: "\<lbrace>Q'\<rbrace> m \<lbrace>R'\<rbrace>"
|
||||
assumes x: "\<And>s. Q s \<Longrightarrow> \<lbrace>\<lambda>s'. (s, s') \<in> sr \<and> P' s'\<rbrace> m \<lbrace>\<lambda>rv s'. (s, s') \<in> sr\<rbrace>"
|
||||
assumes nf: "nf' \<Longrightarrow> no_fail (P' and ex_abs_underlying sr Q) m"
|
||||
shows "corres_underlying sr nf nf' r Q (P' and Q') x (m >>= (\<lambda>rv. y rv))"
|
||||
proof -
|
||||
have P: "corres_underlying sr nf nf' dc Q P' (return undefined) m"
|
||||
apply (rule corres_noop_ex_abs)
|
||||
apply (simp add: x)
|
||||
apply (erule nf)
|
||||
done
|
||||
show ?thesis
|
||||
apply (rule corres_guard_imp)
|
||||
apply (subst return_bind[symmetric], rule corres_split[OF P])
|
||||
apply (rule z)
|
||||
apply wp
|
||||
apply (rule y)
|
||||
apply simp+
|
||||
done
|
||||
qed
|
||||
|
||||
lemmas corres_symb_exec_r_conj_ex_abs_forwards =
|
||||
corres_symb_exec_r_conj_ex_abs[where P'=P' and Q'=P' for P', simplified]
|
||||
|
||||
lemma gets_the_corres_ex_abs':
|
||||
"\<lbrakk>no_ofail P a; no_ofail (P' and ex_abs_underlying sr P) b\<rbrakk> \<Longrightarrow>
|
||||
corres_underlying sr False True r P P' (gets_the a) (gets_the b)
|
||||
= (\<forall>s s'. P s \<and> P' s' \<and> (s, s') \<in> sr \<longrightarrow> r (the (a s)) (the (b s')))"
|
||||
by (fastforce simp: gets_the_def no_ofail_def corres_underlying_def split_def exec_gets
|
||||
assert_opt_def fail_def return_def ex_abs_underlying_def)
|
||||
|
||||
lemmas gets_the_corres_ex_abs = gets_the_corres_ex_abs'[THEN iffD2]
|
||||
|
||||
lemma gets_the_corres':
|
||||
"\<lbrakk>no_ofail P a; no_ofail P' b\<rbrakk> \<Longrightarrow>
|
||||
corres_underlying sr False True r P P' (gets_the a) (gets_the b)
|
||||
= (\<forall>s s'. P s \<and> P' s' \<and> (s, s') \<in> sr \<longrightarrow> r (the (a s)) (the (b s')))"
|
||||
apply (erule gets_the_corres_ex_abs')
|
||||
apply (fastforce intro: no_ofail_pre_imp)
|
||||
done
|
||||
|
||||
lemmas gets_the_corres = gets_the_corres'[THEN iffD2]
|
||||
|
||||
lemma gets_the_corres_relation:
|
||||
"\<lbrakk>no_ofail P f; corres_underlying sr False True r P P' (gets_the f) (gets_the f');
|
||||
P s; P' s'; (s, s') \<in> sr\<rbrakk>
|
||||
\<Longrightarrow> r (the (f s)) (the (f' s'))"
|
||||
apply (rule_tac P=P and a=f and b=f' and P'=P'
|
||||
in gets_the_corres_ex_abs'[THEN iffD1, rule_format];
|
||||
fastforce?)
|
||||
apply (rule no_ofail_gets_the_eq[THEN iffD2])
|
||||
apply (fastforce intro: corres_u_nofail)
|
||||
done
|
||||
|
||||
|
||||
\<comment> \<open>Some @{term corres_underlying} rules for @{term whileLoop}\<close>
|
||||
|
||||
lemma in_whileLoop_corres:
|
||||
assumes body_corres:
|
||||
"\<And>r r'. rrel r r' \<Longrightarrow>
|
||||
|
@ -422,6 +514,10 @@ lemma ifM_corres:
|
|||
apply (wpsimp wp: abs_valid conc_valid hoare_vcg_if_lift2)+
|
||||
done
|
||||
|
||||
lemmas ifM_corres' =
|
||||
ifM_corres[where A=A and B=A and C=A for A, simplified,
|
||||
where A'=A' and B'=A' and C'=A' for A', simplified]
|
||||
|
||||
lemma orM_corres:
|
||||
"\<lbrakk>corres_underlying srel nf nf' (=) A A' a a'; corres_underlying srel nf nf' (=) R R' b b';
|
||||
\<lbrace>B\<rbrace> a \<lbrace>\<lambda>c s. \<not> c \<longrightarrow> R s\<rbrace>; \<lbrace>B'\<rbrace> a' \<lbrace>\<lambda>c s. \<not> c \<longrightarrow> R' s\<rbrace>\<rbrakk>
|
||||
|
@ -432,6 +528,9 @@ lemma orM_corres:
|
|||
apply (wpsimp | fastforce)+
|
||||
done
|
||||
|
||||
lemmas orM_corres' =
|
||||
orM_corres[where A=A and B=A for A, simplified, where A'=A' and B'=A' for A', simplified]
|
||||
|
||||
lemma andM_corres:
|
||||
"\<lbrakk>corres_underlying srel nf nf' (=) A A' a a'; corres_underlying srel nf nf' (=) Q Q' b b';
|
||||
\<lbrace>B\<rbrace> a \<lbrace>\<lambda>c s. c \<longrightarrow> Q s\<rbrace>; \<lbrace>B'\<rbrace> a' \<lbrace>\<lambda>c s. c \<longrightarrow> Q' s\<rbrace>\<rbrakk>
|
||||
|
@ -451,4 +550,8 @@ lemma notM_corres:
|
|||
apply wpsimp+
|
||||
done
|
||||
|
||||
lemma ifM_to_top_of_bind:
|
||||
"((ifM test true false) >>= z) = ifM test (true >>= z) (false >>= z)"
|
||||
by (force simp: ifM_def bind_def split: if_splits)
|
||||
|
||||
end
|
||||
|
|
|
@ -0,0 +1,479 @@
|
|||
(*
|
||||
* Copyright 2020, Data61, CSIRO (ABN 41 687 119 230)
|
||||
*
|
||||
* SPDX-License-Identifier: BSD-2-Clause
|
||||
*)
|
||||
|
||||
(* Singly-linked lists on heaps or projections from heaps, as predicate and as recursive function.
|
||||
Loosely after ~~/src/HOL/Hoare/Pointer_Examples.thy *)
|
||||
|
||||
theory Heap_List
|
||||
imports Main "HOL-Library.Prefix_Order" ListLibLemmas
|
||||
begin
|
||||
|
||||
(* Given a heap projection that returns the next-pointer for an object at address x,
|
||||
given a start pointer x, and an end pointer y, determine if the given list
|
||||
is the path of addresses visited when following the next-pointers from x to y *)
|
||||
primrec heap_path :: "('a \<rightharpoonup> 'a) \<Rightarrow> 'a option \<Rightarrow> 'a list \<Rightarrow> 'a option \<Rightarrow> bool" where
|
||||
"heap_path hp x [] y = (x = y)"
|
||||
| "heap_path hp x (a#as) y = (x = Some a \<and> heap_path hp (hp a) as y)"
|
||||
|
||||
(* When a path ends in None, it is a singly-linked list *)
|
||||
abbreviation heap_ls :: "('a \<rightharpoonup> 'a) \<Rightarrow> 'a option \<Rightarrow> 'a list \<Rightarrow> bool" where
|
||||
"heap_ls hp x xs \<equiv> heap_path hp x xs None"
|
||||
|
||||
(* Walk a linked list of next pointers, recording which it visited.
|
||||
Terminates artificially at loops, and otherwise because the address domain is finite *)
|
||||
function heap_walk :: "('a::finite \<rightharpoonup> 'a) \<Rightarrow> 'a option \<Rightarrow> 'a list \<Rightarrow> 'a list" where
|
||||
"heap_walk hp None xs = xs"
|
||||
| "heap_walk hp (Some x) xs = (if x \<in> set xs then xs else heap_walk hp (hp x) (xs@[x]))"
|
||||
by pat_completeness auto
|
||||
|
||||
lemma card_set_UNIV:
|
||||
fixes xs :: "'a::finite list"
|
||||
assumes "x \<notin> set xs"
|
||||
shows "card (set xs) < card(UNIV::'a set)"
|
||||
proof -
|
||||
have "finite (UNIV::'a set)" by simp
|
||||
moreover
|
||||
from assms have "set xs \<subset> UNIV" by blast
|
||||
ultimately
|
||||
show ?thesis by (rule psubset_card_mono)
|
||||
qed
|
||||
|
||||
termination heap_walk
|
||||
by (relation "measure (\<lambda>(_, _, xs). card(UNIV :: 'a set) - card (set xs))";
|
||||
simp add: card_set_UNIV diff_less_mono2)
|
||||
|
||||
lemma heap_path_append[simp]:
|
||||
"heap_path hp start (xs @ ys) end = (\<exists>x. heap_path hp start xs x \<and> heap_path hp x ys end)"
|
||||
by (induct xs arbitrary: start; simp)
|
||||
|
||||
lemma heap_path_None[simp]:
|
||||
"heap_path hp None xs end = (xs = [] \<and> end = None)"
|
||||
by (cases xs, auto)
|
||||
|
||||
lemma heap_ls_unique:
|
||||
"\<lbrakk> heap_ls hp x xs; heap_ls hp x ys \<rbrakk> \<Longrightarrow> xs = ys"
|
||||
by (induct xs arbitrary: ys x; simp) (case_tac ys; clarsimp)
|
||||
|
||||
lemma heap_ls_hd_not_in_tl:
|
||||
"heap_ls hp (hp x) xs \<Longrightarrow> x \<notin> set xs"
|
||||
proof
|
||||
assume "x \<in> set xs"
|
||||
then obtain ys zs where xs: "xs = ys @ x # zs" by (auto simp: in_set_conv_decomp)
|
||||
moreover assume "heap_ls hp (hp x) xs"
|
||||
moreover from this xs have "heap_ls hp (hp x) zs" by clarsimp
|
||||
ultimately show False by (fastforce dest: heap_ls_unique)
|
||||
qed
|
||||
|
||||
lemma heap_ls_distinct:
|
||||
"heap_ls hp x xs \<Longrightarrow> distinct xs"
|
||||
by (induct xs arbitrary: x; clarsimp simp: heap_ls_hd_not_in_tl)
|
||||
|
||||
lemma heap_ls_is_walk':
|
||||
"\<lbrakk> heap_ls hp x xs; set xs \<inter> set ys = {} \<rbrakk> \<Longrightarrow> heap_walk hp x ys = ys @ xs"
|
||||
by (frule heap_ls_distinct) (induct xs arbitrary: x ys; clarsimp)
|
||||
|
||||
lemma heap_ls_is_walk:
|
||||
"heap_ls hp x xs \<Longrightarrow> heap_walk hp x [] = xs"
|
||||
using heap_ls_is_walk' by fastforce
|
||||
|
||||
lemma heap_path_end_unique:
|
||||
"heap_path hp x xs y \<Longrightarrow> heap_path hp x xs y' \<Longrightarrow> y = y'"
|
||||
by (induct xs arbitrary: x; clarsimp)
|
||||
|
||||
lemma heap_path_head:
|
||||
"heap_path hp x xs y \<Longrightarrow> xs \<noteq> [] \<Longrightarrow> x = Some (hd xs)"
|
||||
by (induct xs arbitrary: x; clarsimp)
|
||||
|
||||
lemma heap_path_non_nil_lookup_next:
|
||||
"heap_path hp x (xs@z#ys) y \<Longrightarrow> hp z = (case ys of [] \<Rightarrow> y | _ \<Rightarrow> Some (hd ys))"
|
||||
by (cases ys; fastforce)
|
||||
|
||||
lemma heap_path_prefix:
|
||||
"heap_path hp st ls ed \<Longrightarrow> \<forall>xs\<le>ls. heap_path hp st xs (if xs = [] then st else hp (last xs))"
|
||||
apply clarsimp
|
||||
apply (erule Prefix_Order.prefixE)
|
||||
by (metis append_butlast_last_id heap_path_append heap_path_non_nil_lookup_next list.case(1))
|
||||
|
||||
lemma heap_path_butlast:
|
||||
"heap_path hp st ls ed \<Longrightarrow> ls \<noteq> [] \<Longrightarrow> heap_path hp st (butlast ls) (Some (last ls))"
|
||||
by (induct ls rule: rev_induct; simp)
|
||||
|
||||
lemma in_list_decompose_takeWhile:
|
||||
"x \<in> set xs \<Longrightarrow>
|
||||
xs = (takeWhile ((\<noteq>) x) xs) @ x # (drop (length (takeWhile ((\<noteq>) x) xs) + 1) xs)"
|
||||
by (induct xs arbitrary: x; clarsimp)
|
||||
|
||||
lemma takeWhile_neq_hd_eq_Nil[simp]:
|
||||
"takeWhile ((\<noteq>) (hd xs)) xs = Nil"
|
||||
by (metis (full_types) hd_Cons_tl takeWhile.simps(1) takeWhile.simps(2))
|
||||
|
||||
lemma heap_not_in_dom[simp]:
|
||||
"ptr \<notin> dom hp \<Longrightarrow> hp(ptr := None) = hp"
|
||||
by (auto simp: dom_def)
|
||||
|
||||
lemma heap_path_takeWhile_lookup_next:
|
||||
"\<lbrakk> heap_path hp st rs ed; r \<in> set rs \<rbrakk>
|
||||
\<Longrightarrow> heap_path hp st (takeWhile ((\<noteq>) r) rs) (Some r)"
|
||||
apply (drule heap_path_prefix)
|
||||
apply (subgoal_tac "takeWhile ((\<noteq>) r) rs @ [r] \<le> rs", fastforce)
|
||||
by (fastforce dest!: in_list_decompose_takeWhile intro: Prefix_Order.prefixI)
|
||||
|
||||
lemma heap_path_heap_upd_not_in:
|
||||
"\<lbrakk>heap_path hp st rs ed; r \<notin> set rs\<rbrakk> \<Longrightarrow> heap_path (hp(r:= x)) st rs ed"
|
||||
by (induct rs arbitrary: st; clarsimp)
|
||||
|
||||
lemma heap_path_last_update:
|
||||
"\<lbrakk>heap_path hp st xs end; xs \<noteq> []; distinct xs\<rbrakk> \<Longrightarrow> heap_path (hp(last xs := new)) st xs new"
|
||||
by (induct xs arbitrary: st rule: rev_induct; simp add: heap_path_heap_upd_not_in)
|
||||
|
||||
lemma heap_walk_lb:
|
||||
"heap_walk hp x xs \<ge> xs"
|
||||
apply (induct xs rule: heap_walk.induct; clarsimp)
|
||||
by (metis Prefix_Order.prefixE Prefix_Order.prefixI append_assoc)
|
||||
|
||||
lemma heal_walk_Some_nonempty':
|
||||
"heap_walk hp (Some x) [] > []"
|
||||
by (fastforce intro: heap_walk_lb less_le_trans[where y="[x]"])
|
||||
|
||||
lemma heal_walk_Some_nonempty:
|
||||
"heap_walk hp (Some x) [] \<noteq> []"
|
||||
by (metis less_list_def heal_walk_Some_nonempty')
|
||||
|
||||
lemma heap_walk_Nil_None:
|
||||
"heap_walk hp st [] = [] \<Longrightarrow> st = None"
|
||||
by (case_tac st; simp only: heal_walk_Some_nonempty)
|
||||
|
||||
lemma heap_path_last_end:
|
||||
"heap_path hp st xs end \<Longrightarrow> xs \<noteq> [] \<Longrightarrow> hp (last xs) = end"
|
||||
by (induct xs rule: rev_induct; clarsimp)
|
||||
|
||||
lemmas heap_ls_last_None = heap_path_last_end[where ?end=None]
|
||||
|
||||
(* sym_heap *)
|
||||
|
||||
definition sym_heap where
|
||||
"sym_heap hp hp' \<equiv> \<forall>p p'. hp p = Some p' \<longleftrightarrow> hp' p' = Some p"
|
||||
|
||||
lemma sym_heapD1:
|
||||
"sym_heap hp hp' \<Longrightarrow> hp p = Some p' \<Longrightarrow> hp' p' = Some p"
|
||||
by (clarsimp simp: sym_heap_def)
|
||||
|
||||
lemma sym_heapD2:
|
||||
"sym_heap hp hp' \<Longrightarrow> hp' p' = Some p \<Longrightarrow> hp p = Some p'"
|
||||
by (clarsimp simp: sym_heap_def)
|
||||
|
||||
lemma sym_heap_symmetric:
|
||||
"sym_heap hp hp' \<longleftrightarrow> sym_heap hp' hp"
|
||||
unfolding sym_heap_def by blast
|
||||
|
||||
lemma sym_heap_None:
|
||||
"\<lbrakk>sym_heap hp hp'; hp p = None\<rbrakk> \<Longrightarrow> \<forall>p'. hp' p' \<noteq> Some p" unfolding sym_heap_def by force
|
||||
|
||||
lemma sym_heap_remove_only:
|
||||
"\<lbrakk>sym_heap hp hp'; hp' y = Some x\<rbrakk> \<Longrightarrow> sym_heap (hp(x := None)) (hp'(y := None))"
|
||||
apply (clarsimp simp: sym_heap_def)
|
||||
apply (metis option.inject)
|
||||
done
|
||||
|
||||
lemma sym_heap_remove_only':
|
||||
"\<lbrakk>sym_heap hp hp'; hp y = Some x\<rbrakk> \<Longrightarrow> sym_heap (hp(y := None)) (hp'(x := None))"
|
||||
apply (clarsimp simp: sym_heap_def)
|
||||
apply (metis option.inject)
|
||||
done
|
||||
|
||||
lemma sym_heap_remove_middle_from_chain:
|
||||
"\<lbrakk>sym_heap hp hp'; before \<noteq> middle; middle \<noteq> after;
|
||||
hp before = Some middle; hp middle = Some after\<rbrakk>
|
||||
\<Longrightarrow> sym_heap (hp(before := Some after, middle := None))
|
||||
(hp'(after := Some before, middle := None))"
|
||||
apply (clarsimp simp: sym_heap_def)
|
||||
apply (metis option.simps(1))
|
||||
done
|
||||
|
||||
lemma sym_heap_connect:
|
||||
"\<lbrakk>sym_heap hp hp'; hp a = None; hp' b = None \<rbrakk> \<Longrightarrow> sym_heap (hp(a \<mapsto> b)) (hp'(b \<mapsto> a))"
|
||||
by (force simp: sym_heap_def)
|
||||
|
||||
lemma sym_heap_insert_into_middle_of_chain:
|
||||
"\<lbrakk>sym_heap hp hp'; hp before = Some after; hp middle = None; hp' middle = None\<rbrakk>
|
||||
\<Longrightarrow> sym_heap (hp(before \<mapsto> middle, middle \<mapsto> after)) (hp'(after \<mapsto> middle, middle \<mapsto> before))"
|
||||
apply (clarsimp simp: sym_heap_def)
|
||||
apply (metis option.simps)
|
||||
done
|
||||
|
||||
lemma sym_heap_path_reverse:
|
||||
"sym_heap hp hp' \<Longrightarrow>
|
||||
heap_path hp (Some p) (p#ps) (Some p')
|
||||
\<longleftrightarrow> heap_path hp' (Some p') (p'#(rev ps)) (Some p)"
|
||||
unfolding sym_heap_def by (induct ps arbitrary: p p' rule: rev_induct; force)
|
||||
|
||||
lemma sym_heap_ls_rev_Cons:
|
||||
"\<lbrakk>sym_heap hp hp'; heap_ls hp (Some p) (p#ps)\<rbrakk>
|
||||
\<Longrightarrow> heap_path hp' (Some (last (p#ps))) (rev ps) (Some p)"
|
||||
supply rev.simps[simp del]
|
||||
apply (induct ps arbitrary: p rule: rev_induct; simp add: rev.simps)
|
||||
by (auto dest!: sym_heap_path_reverse[THEN iffD1])
|
||||
|
||||
lemma sym_heap_ls_rev:
|
||||
"\<lbrakk>sym_heap hp hp'; heap_ls hp (Some p) ps\<rbrakk>
|
||||
\<Longrightarrow> heap_path hp' (Some (last ps)) (butlast (rev ps)) (Some p)
|
||||
\<and> hp (last ps) = None"
|
||||
apply (induct ps arbitrary: p rule: rev_induct, simp)
|
||||
apply (frule heap_path_head; clarsimp)
|
||||
by (auto dest!: sym_heap_path_reverse[THEN iffD1])
|
||||
|
||||
lemma heap_path_sym_heap_non_nil_lookup_prev:
|
||||
"\<lbrakk>heap_ls hp x (xs @ z # ys); sym_heap hp hp'; xs \<noteq> []\<rbrakk> \<Longrightarrow> hp' z = (Some (last xs))"
|
||||
supply heap_path_append[simp del]
|
||||
apply (cut_tac xs="butlast xs" and z="last xs" and ys="z # ys"
|
||||
in heap_path_non_nil_lookup_next[where hp=hp and x=x and y=None])
|
||||
apply (frule append_butlast_last_id)
|
||||
apply (metis append_eq_Cons_conv append_eq_append_conv2)
|
||||
apply (fastforce dest: sym_heapD1)
|
||||
done
|
||||
|
||||
lemma heap_ls_no_loops:
|
||||
"\<lbrakk>heap_ls hp st xs; p \<in> set xs\<rbrakk> \<Longrightarrow> hp p \<noteq> Some p"
|
||||
apply (frule heap_ls_distinct)
|
||||
apply (fastforce dest: split_list heap_path_non_nil_lookup_next split: list.splits)
|
||||
done
|
||||
|
||||
lemma heap_ls_prev_no_loops:
|
||||
"\<lbrakk>heap_ls hp st xs; p \<in> set xs; sym_heap hp hp'\<rbrakk> \<Longrightarrow> hp' p \<noteq> Some p"
|
||||
by (fastforce dest: heap_ls_no_loops sym_heapD2)
|
||||
|
||||
(* more on heap_path : next/prev in path *)
|
||||
|
||||
lemma heap_path_extend:
|
||||
"heap_path hp st (ls @ [p]) (hp p) \<longleftrightarrow> heap_path hp st ls (Some p)"
|
||||
by (induct ls rule: rev_induct; simp)
|
||||
|
||||
lemma heap_path_prefix_heap_ls:
|
||||
"\<lbrakk>heap_ls hp st xs; heap_path hp st ys ed\<rbrakk> \<Longrightarrow> ys \<le> xs"
|
||||
apply (induct xs arbitrary: ys st, simp)
|
||||
apply (case_tac ys; clarsimp)
|
||||
done
|
||||
|
||||
lemma distinct_decompose2:
|
||||
"\<lbrakk>distinct xs; xs = ys @ x # y # zs\<rbrakk>
|
||||
\<Longrightarrow> x \<noteq> y \<and> x \<notin> set ys \<and> y \<notin> set ys \<and> x \<notin> set zs \<and> y \<notin> set zs"
|
||||
by (simp add: in_set_conv_decomp)
|
||||
|
||||
lemma heap_path_distinct_next_cases: (* the other direction needs sym_heap *)
|
||||
"\<lbrakk>heap_path hp st xs ed; distinct xs; p \<in> set xs; hp p = Some np\<rbrakk>
|
||||
\<Longrightarrow> ed = Some p \<or> ed = Some np \<or> np \<in> set xs"
|
||||
apply (cases ed; simp)
|
||||
apply (frule in_list_decompose_takeWhile)
|
||||
apply (subgoal_tac "heap_ls hp st (takeWhile ((\<noteq>) p) xs @ p # drop (length (takeWhile ((\<noteq>) p) xs) + 1) xs)")
|
||||
apply (drule heap_path_non_nil_lookup_next)
|
||||
apply (case_tac "drop (length (takeWhile ((\<noteq>) p) xs) + 1) xs"; simp)
|
||||
apply (metis in_set_dropD list.set_intros(1))
|
||||
apply simp
|
||||
apply (frule in_list_decompose_takeWhile)
|
||||
apply (subgoal_tac "heap_path hp st (takeWhile ((\<noteq>) p) xs @ p # drop (length (takeWhile ((\<noteq>) p) xs) + 1) xs) ed")
|
||||
apply (frule heap_path_non_nil_lookup_next)
|
||||
apply (case_tac "drop (length (takeWhile ((\<noteq>) p) xs) + 1) xs", simp)
|
||||
apply (simp split: if_split_asm)
|
||||
apply (drule (1) distinct_decompose2)
|
||||
apply clarsimp
|
||||
by (metis in_set_dropD list.set_intros(1)) simp
|
||||
|
||||
lemma heap_ls_next_in_list:
|
||||
"\<lbrakk>heap_ls hp st xs; p \<in> set xs; hp p = Some np\<rbrakk>
|
||||
\<Longrightarrow> np \<in> set xs"
|
||||
apply (subgoal_tac "distinct xs")
|
||||
by (fastforce dest!: heap_path_distinct_next_cases) (erule heap_ls_distinct)
|
||||
|
||||
lemma heap_path_distinct_sym_prev_cases:
|
||||
"\<lbrakk>heap_path hp st xs ed; distinct xs; np \<in> set xs; hp p = Some np; sym_heap hp hp'\<rbrakk>
|
||||
\<Longrightarrow> st = Some np \<or> p \<in> set xs"
|
||||
apply (cases st; simp)
|
||||
apply (rename_tac stp)
|
||||
apply (case_tac "stp = np"; simp)
|
||||
apply (cases xs; simp del: heap_path.simps)
|
||||
apply (frule heap_path_head, simp)
|
||||
apply (cases ed, clarsimp)
|
||||
apply (frule sym_heap_ls_rev_Cons, fastforce)
|
||||
apply (drule heap_path_distinct_next_cases[where hp=hp']; simp add: sym_heap_def)
|
||||
apply simp
|
||||
apply (simp del: heap_path.simps)
|
||||
apply (frule (1) sym_heap_path_reverse[where hp'=hp', THEN iffD1])
|
||||
apply simp
|
||||
apply (frule heap_path_distinct_next_cases[where hp=hp']; simp add: sym_heap_def)
|
||||
apply fastforce
|
||||
done
|
||||
|
||||
lemma heap_ls_prev_cases:
|
||||
"\<lbrakk>heap_ls hp st xs; np \<in> set xs; hp p = Some np; sym_heap hp hp'\<rbrakk>
|
||||
\<Longrightarrow> st = Some np \<or> p \<in> set xs"
|
||||
apply (subgoal_tac "distinct xs")
|
||||
by (fastforce dest!: heap_path_distinct_sym_prev_cases) (erule heap_ls_distinct)
|
||||
|
||||
lemma heap_ls_prev_not_in:
|
||||
"\<lbrakk>heap_ls hp st xs; np \<notin> set xs; hp p = Some np\<rbrakk>
|
||||
\<Longrightarrow> p \<notin> set xs"
|
||||
by (meson heap_ls_next_in_list)
|
||||
|
||||
lemma heap_path_distinct_prev_not_in:
|
||||
"\<lbrakk>heap_path hp st xs ed; distinct xs; np \<notin> set xs; hp p = Some np; ed \<noteq> Some np; ed \<noteq> Some p\<rbrakk>
|
||||
\<Longrightarrow> p \<notin> set xs"
|
||||
using heap_path_distinct_next_cases
|
||||
by fastforce
|
||||
|
||||
lemma heap_path_distinct_next_not_in:
|
||||
"\<lbrakk>heap_path hp st xs ed; distinct xs; p \<notin> set xs; hp p = Some np;
|
||||
sym_heap hp hp'; st \<noteq> Some np\<rbrakk>
|
||||
\<Longrightarrow> np \<notin> set xs"
|
||||
by (fastforce dest!: heap_path_distinct_sym_prev_cases[simplified])
|
||||
|
||||
lemma heap_ls_next_not_in:
|
||||
"\<lbrakk>heap_ls hp st xs; p \<notin> set xs; hp p = Some np; sym_heap hp hp'; st \<noteq> Some np\<rbrakk>
|
||||
\<Longrightarrow> np \<notin> set xs"
|
||||
by (fastforce dest!: heap_ls_prev_cases[simplified])
|
||||
|
||||
lemma sym_heap_prev_None_is_start:
|
||||
"\<lbrakk>heap_ls hp st xs; sym_heap hp hp'; p \<in> set xs; hp' p = None\<rbrakk>
|
||||
\<Longrightarrow> Some p = st"
|
||||
using split_list_last heap_path_sym_heap_non_nil_lookup_prev
|
||||
by fastforce
|
||||
|
||||
lemma not_last_next_not_None:
|
||||
"\<lbrakk>heap_ls hp st xs; p \<in> set xs; p \<noteq> last xs\<rbrakk> \<Longrightarrow> hp p \<noteq> None"
|
||||
by (fastforce intro: heap_path_head dest: split_list)
|
||||
|
||||
lemma not_head_prev_not_None:
|
||||
"\<lbrakk>heap_ls hp st xs; p \<in> set xs; p \<noteq> hd xs; sym_heap hp hp'\<rbrakk>
|
||||
\<Longrightarrow> hp' p \<noteq> None"
|
||||
using sym_heap_prev_None_is_start heap_path_head
|
||||
by fastforce
|
||||
|
||||
(* more on heap_path *)
|
||||
|
||||
lemma heap_ls_next_takeWhile_append:
|
||||
"\<lbrakk>heap_ls hp st xs; p \<in> set xs; hp p = Some np\<rbrakk>
|
||||
\<Longrightarrow> takeWhile ((\<noteq>) np) xs = (takeWhile ((\<noteq>) p) xs) @ [p]"
|
||||
apply (frule heap_ls_distinct)
|
||||
apply (frule in_list_decompose_takeWhile)
|
||||
apply (subgoal_tac "heap_ls hp st (takeWhile ((\<noteq>) p) xs @ p # drop (length (takeWhile ((\<noteq>) p) xs) + 1) xs)")
|
||||
prefer 2 apply simp
|
||||
apply (drule heap_path_non_nil_lookup_next)
|
||||
apply (case_tac "drop (length (takeWhile ((\<noteq>) p) xs) + 1) xs"; simp)
|
||||
apply (subgoal_tac "np \<in> set xs")
|
||||
prefer 2 apply (erule (2) heap_ls_next_in_list)
|
||||
apply (frule in_list_decompose_takeWhile[where x=np])
|
||||
apply (drule (1) distinct_inj_middle[where x=np and xa="takeWhile ((\<noteq>) np) xs" and ya="takeWhile ((\<noteq>) p) xs @ [p]"])
|
||||
apply simp+
|
||||
done
|
||||
|
||||
(* RT FIXME: Move *)
|
||||
lemma takeWhile_neq_notin_same:
|
||||
"x \<notin> set xs \<Longrightarrow> takeWhile ((\<noteq>) x) xs = xs"
|
||||
using takeWhile_eq_all_conv by blast
|
||||
|
||||
lemma heap_path_extend_takeWhile:
|
||||
"\<lbrakk>heap_ls hp st xs; heap_path hp st (takeWhile ((\<noteq>) p) xs) (Some p); hp p = Some np\<rbrakk>
|
||||
\<Longrightarrow> heap_path hp st (takeWhile ((\<noteq>) np) xs) (Some np)"
|
||||
apply (case_tac "p \<in> set xs")
|
||||
apply (subst heap_ls_next_takeWhile_append[where p=p and np=np and hp=hp]; simp)
|
||||
apply (drule takeWhile_neq_notin_same, simp)
|
||||
apply (drule (1) heap_path_end_unique, simp)
|
||||
done
|
||||
|
||||
lemma heap_ls_next_takeWhile_append_sym:
|
||||
"\<lbrakk>heap_ls hp st xs; np \<in> set xs; st \<noteq> Some np; hp p = Some np; sym_heap hp hp'\<rbrakk>
|
||||
\<Longrightarrow>takeWhile ((\<noteq>) np) xs = (takeWhile ((\<noteq>) p) xs) @ [p]"
|
||||
apply (frule (3) heap_ls_prev_cases, simp)
|
||||
apply (fastforce elim!: heap_ls_next_takeWhile_append)
|
||||
done
|
||||
|
||||
lemma heap_path_curtail_takeWhile:
|
||||
"\<lbrakk>heap_ls hp st xs; heap_path hp st (takeWhile ((\<noteq>) np) xs) (Some np);
|
||||
st \<noteq> Some np; hp p = Some np; sym_heap hp hp'\<rbrakk>
|
||||
\<Longrightarrow> heap_path hp st (takeWhile ((\<noteq>) p) xs) (Some p)"
|
||||
apply (case_tac "np \<in> set xs")
|
||||
apply (drule (4) heap_ls_next_takeWhile_append_sym)
|
||||
apply simp
|
||||
apply (drule takeWhile_neq_notin_same, simp)
|
||||
apply (drule (1) heap_path_end_unique, simp)
|
||||
done
|
||||
|
||||
(* more on heap_path : end *)
|
||||
|
||||
|
||||
\<comment> \<open>Lemmas relating an update to the list to an update to the heap\<close>
|
||||
|
||||
lemma heap_ls_prepend:
|
||||
"\<lbrakk>heap_ls hp st xs; new \<notin> set xs; xs \<noteq> []\<rbrakk>
|
||||
\<Longrightarrow> heap_ls (hp(new := Some (hd xs))) (Some new) (new # xs)"
|
||||
apply simp
|
||||
apply (erule heap_path_heap_upd_not_in[rotated])
|
||||
apply (frule (1) heap_path_head)
|
||||
apply fastforce
|
||||
done
|
||||
|
||||
lemma heap_ls_append:
|
||||
"\<lbrakk>heap_ls hp st xs; xs \<noteq> []; new \<notin> set xs\<rbrakk>
|
||||
\<Longrightarrow> heap_ls (hp(last xs := Some new, new := None)) st (xs @ [new])"
|
||||
apply (frule heap_ls_distinct)
|
||||
apply simp
|
||||
apply (rule heap_path_heap_upd_not_in)
|
||||
apply (fastforce simp: heap_path_last_update)
|
||||
apply assumption
|
||||
done
|
||||
|
||||
lemma heap_ls_list_insert_before:
|
||||
"\<lbrakk>heap_ls hp st (xs @ ys); new \<notin> set (xs @ ys); xs \<noteq> []; ys \<noteq> []\<rbrakk>
|
||||
\<Longrightarrow> heap_ls (hp(last xs := Some new, new := Some (hd ys))) st
|
||||
(list_insert_before (xs @ ys) (hd ys) new)"
|
||||
apply (frule heap_ls_distinct)
|
||||
apply (subst list_insert_before_distinct; fastforce?)
|
||||
apply simp
|
||||
apply (rule conjI)
|
||||
\<comment> \<open>the path until new\<close>
|
||||
apply (fastforce intro: heap_path_heap_upd_not_in heap_path_last_update)
|
||||
\<comment> \<open>the path from hd ys\<close>
|
||||
apply (metis disjoint_iff_not_equal heap_path_head heap_path_heap_upd_not_in last_in_set)
|
||||
done
|
||||
|
||||
lemma heap_ls_remove_singleton:
|
||||
"heap_ls hp st [x] \<Longrightarrow> heap_ls (hp(x := None)) None []"
|
||||
by simp
|
||||
|
||||
lemma heap_ls_remove_head_not_singleton:
|
||||
"\<lbrakk>heap_ls hp st xs; tl xs \<noteq> []\<rbrakk>
|
||||
\<Longrightarrow> heap_ls (hp(hd xs := None)) (Some (hd (tl xs))) (tl xs)"
|
||||
apply (frule heap_ls_distinct)
|
||||
apply (cases xs; simp)
|
||||
apply clarsimp
|
||||
apply (frule heap_path_head)
|
||||
apply fastforce
|
||||
apply (fastforce elim!: heap_path_heap_upd_not_in)
|
||||
done
|
||||
|
||||
lemma heap_ls_remove_last_not_singleton:
|
||||
"\<lbrakk>heap_ls hp st xs; butlast xs \<noteq> []\<rbrakk>
|
||||
\<Longrightarrow> heap_ls (hp((last (butlast xs)) := None)) st (butlast xs)"
|
||||
apply (frule heap_ls_distinct)
|
||||
apply (frule distinct_butlast)
|
||||
apply (fastforce dest: heap_path_last_update heap_path_butlast)
|
||||
done
|
||||
|
||||
lemma heap_ls_remove_middle:
|
||||
"\<lbrakk>heap_ls hp st (xs @ a # ys); xs \<noteq> []; ys \<noteq> []\<rbrakk>
|
||||
\<Longrightarrow> heap_ls (hp(last xs := Some (hd ys), a := None)) st (xs @ ys)"
|
||||
apply (frule heap_ls_distinct)
|
||||
apply simp
|
||||
apply (rule_tac x="Some (hd ys)" in exI)
|
||||
apply (rule conjI)
|
||||
apply (fastforce intro: heap_path_heap_upd_not_in heap_path_last_update)
|
||||
apply (rule heap_path_heap_upd_not_in)
|
||||
apply (rule heap_path_heap_upd_not_in)
|
||||
using heap_path_head apply fastforce
|
||||
apply force
|
||||
apply fastforce
|
||||
done
|
||||
|
||||
end
|
|
@ -2558,6 +2558,10 @@ lemma ranD:
|
|||
"v \<in> ran f \<Longrightarrow> \<exists>x. f x = Some v"
|
||||
by (auto simp: ran_def)
|
||||
|
||||
lemma fun_upd_swap:
|
||||
"a \<noteq> c \<Longrightarrow> hp(c := d, a := b) = hp(a := b, c := d)"
|
||||
by fastforce
|
||||
|
||||
text \<open>Prevent clarsimp and others from creating Some from not None by folding this and unfolding
|
||||
again when safe.\<close>
|
||||
|
||||
|
|
|
@ -371,6 +371,32 @@ lemma list_insert_after_after:
|
|||
apply fastforce
|
||||
done
|
||||
|
||||
lemma list_insert_before_not_found:
|
||||
"a \<notin> set ls \<Longrightarrow> list_insert_before ls a new = ls"
|
||||
by (induct ls; fastforce)
|
||||
|
||||
lemma list_insert_before_nonempty:
|
||||
"ls \<noteq> [] \<Longrightarrow> list_insert_before ls a new \<noteq> []"
|
||||
by (induct ls; clarsimp)
|
||||
|
||||
lemma list_insert_before_head:
|
||||
"xs \<noteq> [] \<Longrightarrow> list_insert_before xs (hd xs) new = new # xs"
|
||||
by (induct xs; fastforce)
|
||||
|
||||
lemma last_of_list_insert_before:
|
||||
"xs \<noteq> [] \<Longrightarrow> last (list_insert_before xs a new) = last xs"
|
||||
by (induct xs; clarsimp simp: list_insert_before_nonempty)
|
||||
|
||||
lemma list_insert_before_distinct:
|
||||
"\<lbrakk>distinct (xs @ ys); ys \<noteq> []\<rbrakk> \<Longrightarrow> list_insert_before (xs @ ys) (hd ys) new = xs @ new # ys"
|
||||
by (induct xs; fastforce simp add: list_insert_before_head)
|
||||
|
||||
lemma set_list_insert_before:
|
||||
"\<lbrakk>new \<notin> set ls; before \<in> set ls\<rbrakk> \<Longrightarrow> set (list_insert_before ls before new) = set ls \<union> {new}"
|
||||
apply (induct ls; clarsimp)
|
||||
apply fastforce
|
||||
done
|
||||
|
||||
lemma list_remove_removed:
|
||||
"set (list_remove list x) = (set list) - {x}"
|
||||
apply (induct list,simp+)
|
||||
|
|
|
@ -26,6 +26,12 @@ primrec list_insert_after :: "'a list \<Rightarrow> 'a \<Rightarrow> 'a \<Righta
|
|||
"list_insert_after (x # xs) a b = (if x = a then x # b # xs
|
||||
else x # list_insert_after xs a b)"
|
||||
|
||||
\<comment> \<open>Inserts the value new immediately before the first occurence of a (if any) in the list\<close>
|
||||
primrec list_insert_before :: "'a list \<Rightarrow> 'a \<Rightarrow> 'a \<Rightarrow> 'a list" where
|
||||
"list_insert_before [] a new = []" |
|
||||
"list_insert_before (x # xs) a new = (if x = a then new # x # xs
|
||||
else x # list_insert_before xs a new)"
|
||||
|
||||
primrec list_remove :: "'a list \<Rightarrow> 'a \<Rightarrow> 'a list" where
|
||||
"list_remove [] a = []" |
|
||||
"list_remove (x # xs) a = (if x = a then (list_remove xs a)
|
||||
|
|
|
@ -10,7 +10,7 @@
|
|||
theory MonadicRewrite
|
||||
imports
|
||||
Monads.Nondet_VCG
|
||||
Corres_UL
|
||||
ExtraCorres
|
||||
Monads.Nondet_Empty_Fail
|
||||
Rules_Tac
|
||||
begin
|
||||
|
@ -81,6 +81,10 @@ lemma monadic_rewrite_pre_imp_eq:
|
|||
"\<lbrakk> \<And>s. P s \<Longrightarrow> f s = g s \<rbrakk> \<Longrightarrow> monadic_rewrite F E P f g"
|
||||
by (simp add: monadic_rewrite_def)
|
||||
|
||||
lemma monadic_rewrite_guard_arg_cong:
|
||||
"(\<And>s. P s \<Longrightarrow> x = y) \<Longrightarrow> monadic_rewrite F E P (f x) (f y)"
|
||||
by (clarsimp simp: monadic_rewrite_def)
|
||||
|
||||
lemma monadic_rewrite_exists:
|
||||
"(\<And>v. monadic_rewrite F E (Q v) m m')
|
||||
\<Longrightarrow> monadic_rewrite F E ((\<lambda>s. \<forall>v. P v s \<longrightarrow> Q v s) and (\<lambda>s. \<exists>v. P v s)) m m'"
|
||||
|
@ -693,7 +697,7 @@ lemma monadic_rewrite_refine_validE_R:
|
|||
by (simp add: validE_R_def validE_def monadic_rewrite_refine_valid)
|
||||
|
||||
lemma monadic_rewrite_is_valid:
|
||||
"\<lbrakk> monadic_rewrite False False P' f f'; \<lbrace>P\<rbrace> do x <- f; g x od \<lbrace>Q\<rbrace> \<rbrakk>
|
||||
"\<lbrakk> monadic_rewrite False E P' f f'; \<lbrace>P\<rbrace> do x <- f; g x od \<lbrace>Q\<rbrace> \<rbrakk>
|
||||
\<Longrightarrow> \<lbrace>P and P'\<rbrace> do x <- f'; g x od \<lbrace>Q\<rbrace>"
|
||||
by (fastforce simp: monadic_rewrite_def valid_def bind_def)
|
||||
|
||||
|
@ -740,6 +744,13 @@ lemma monadic_rewrite_corres_r_generic:
|
|||
\<Longrightarrow> corres_underlying R nf nf' r P (P' and Q) a c"
|
||||
by (fastforce simp: corres_underlying_def monadic_rewrite_def)
|
||||
|
||||
lemma monadic_rewrite_corres_r_generic_ex_abs:
|
||||
"\<lbrakk> monadic_rewrite F E (\<lambda>s'. Q s' \<and> ex_abs_underlying sr P s') c' c;
|
||||
corres_underlying sr nf nf'' r P P' a c';
|
||||
F \<longrightarrow> nf''; nf' \<longrightarrow> nf'' \<rbrakk>
|
||||
\<Longrightarrow> corres_underlying sr nf nf' r P (P' and Q) a c"
|
||||
by (fastforce simp: corres_underlying_def monadic_rewrite_def)
|
||||
|
||||
lemma monadic_rewrite_corres_r:
|
||||
"\<lbrakk> monadic_rewrite False True Q c c';
|
||||
corres_underlying R nf nf' r P P' a c' \<rbrakk>
|
||||
|
|
|
@ -7,6 +7,8 @@
|
|||
(* Syntax for using multi-argument functions as predicates, e.g "P and Q" where P and Q are
|
||||
functions to bool, taking one or more parameters. *)
|
||||
|
||||
chapter \<open>Function Predicate Syntax\<close>
|
||||
|
||||
theory Fun_Pred_Syntax
|
||||
imports Main
|
||||
begin
|
||||
|
@ -137,7 +139,9 @@ lemma pred_bot_comp[simp]:
|
|||
by (simp add: comp_def)
|
||||
|
||||
|
||||
text \<open>We would get these for free if we could instantiate pred_top/pred_bot to top/bot directly:\<close>
|
||||
text \<open>
|
||||
We would get these for free if we could instantiate @{const pred_top}/@{const pred_bot} to
|
||||
@{const top}/@{const bot} directly:\<close>
|
||||
|
||||
lemmas pred_top_left_neutral[simp] =
|
||||
inf_top.left_neutral[where 'a="'a \<Rightarrow> bool", unfolded pred_top_def]
|
||||
|
|
|
@ -7,6 +7,7 @@
|
|||
chapter Lib
|
||||
|
||||
session Monads (lib) = HOL +
|
||||
options [document = pdf]
|
||||
|
||||
sessions
|
||||
"HOL-Library"
|
||||
|
@ -21,14 +22,7 @@ session Monads (lib) = HOL +
|
|||
trace
|
||||
|
||||
theories
|
||||
WPBang
|
||||
WPFix
|
||||
Eisbach_WP
|
||||
WPI
|
||||
WPC
|
||||
WP_Pre
|
||||
WP
|
||||
Datatype_Schematic
|
||||
Fun_Pred_Syntax
|
||||
Nondet_Monad
|
||||
Nondet_Lemmas
|
||||
Nondet_VCG
|
||||
|
@ -49,6 +43,28 @@ session Monads (lib) = HOL +
|
|||
Trace_Lemmas
|
||||
Trace_VCG
|
||||
Trace_Det
|
||||
Trace_No_Throw
|
||||
Trace_Empty_Fail
|
||||
Trace_No_Trace
|
||||
Trace_Total
|
||||
Trace_Strengthen_Setup
|
||||
Trace_Monad_Equations
|
||||
Trace_RG
|
||||
Trace_In_Monad
|
||||
Trace_More_VCG
|
||||
Trace_No_Fail
|
||||
Trace_Sat
|
||||
Strengthen
|
||||
Nondet_Strengthen_Setup
|
||||
Strengthen_Demo
|
||||
WPBang
|
||||
WPFix
|
||||
Eisbach_WP
|
||||
WPI
|
||||
WPC
|
||||
WP_Pre
|
||||
WP
|
||||
Datatype_Schematic
|
||||
|
||||
document_files
|
||||
"root.tex"
|
||||
|
|
|
@ -4,6 +4,8 @@
|
|||
* SPDX-License-Identifier: BSD-2-Clause
|
||||
*)
|
||||
|
||||
section \<open>Manipulating Hoare Logic Assertions\<close>
|
||||
|
||||
theory Strengthen
|
||||
imports Main
|
||||
begin
|
||||
|
|
|
@ -73,11 +73,11 @@ thm subset_UNIV subset_UNIV[mk_strg]
|
|||
|
||||
text \<open>Rules which would introduce schematics are
|
||||
adjusted by @{attribute mk_strg} to introduce quantifiers
|
||||
instead. The argument I to mk_strg prevents this step.
|
||||
instead. The argument I to @{attribute mk_strg} prevents this step.
|
||||
\<close>
|
||||
thm subsetD subsetD[mk_strg I] subsetD[mk_strg]
|
||||
|
||||
text \<open>The first argument to mk_strg controls the way
|
||||
text \<open>The first argument to @{attribute mk_strg} controls the way
|
||||
the rule will be applied.
|
||||
I: use rule in introduction style, matching its conclusion.
|
||||
D: use rule in destruction (forward) style, matching its first premise.
|
||||
|
@ -101,10 +101,10 @@ lemma
|
|||
(* oops, overdid it *)
|
||||
oops
|
||||
|
||||
text \<open>Subsequent arguments to mk_strg capture premises for
|
||||
text \<open>Subsequent arguments to @{attribute mk_strg} capture premises for
|
||||
special treatment. The 'A' argument (synonym 'E') specifies that
|
||||
a premise should be solved by assumption. Our fancy proof above
|
||||
used a strengthen rule bexI[mk_strg I _ A], which tells strengthen
|
||||
used a strengthen rule @{text "bexI[mk_strg I _ A]"}, which tells strengthen
|
||||
to do approximately the same thing as
|
||||
\<open>apply (rule bexI) prefer 2 apply assumption\<close>
|
||||
|
||||
|
|
|
@ -0,0 +1,81 @@
|
|||
%
|
||||
% Copyright 2020, Data61, CSIRO (ABN 41 687 119 230)
|
||||
%
|
||||
% SPDX-License-Identifier: CC-BY-SA-4.0
|
||||
%
|
||||
|
||||
\documentclass[11pt,a4paper]{report}
|
||||
\usepackage[T1]{fontenc}
|
||||
\usepackage{isabelle,isabellesym}
|
||||
|
||||
% this should be the last package used
|
||||
\usepackage{pdfsetup}
|
||||
|
||||
% urls in roman style, theory text in math-similar italics
|
||||
\urlstyle{rm}
|
||||
\isabellestyle{tt}
|
||||
|
||||
|
||||
\begin{document}
|
||||
|
||||
\title{State Monad Library}
|
||||
\author{David Greenaway,
|
||||
Gerwin Klein,
|
||||
Corey Lewis,
|
||||
Daniel Matichuk,
|
||||
Thomas Sewell}
|
||||
\maketitle
|
||||
|
||||
\begin{abstract}
|
||||
This entry contains a library of different state monads with a large set of
|
||||
operators, laws, Hoare Logic, weakest precondition rules, and corresponding
|
||||
automation. The formalisation is designed for reasoning about total and
|
||||
partial correctness, as well as for reasoning about failure separately from
|
||||
normal behaviour. Partial correctness in this context ignores program failure.
|
||||
Total correctness implies the absence of program failure.
|
||||
|
||||
The main monads formalised in this entry are a non-deterministic state monad
|
||||
with failure, and a state-based trace monad for modelling concurrent executions.
|
||||
Both support roughly the same set of operators. They come with a standard
|
||||
Hoare Logic and Rely-Guarantee logic respectively. The entry also contains an
|
||||
option reader monad that combines well with the non-deterministic state monad.
|
||||
The option reader monad provides additional operators useful for building
|
||||
state projections that can be used both in monadic functions and Hoare-Logic
|
||||
assertions, enhancing specification re-use in proofs.
|
||||
|
||||
This monad library is used in the verification of the seL4 microkernel and
|
||||
predates some of the monad developments in the Isabelle library. In
|
||||
particular, it defines its own syntax for do-notation, which can be overridden
|
||||
with the generic monad syntax in the Isabelle library. We have opted not to do
|
||||
so by default, because the overloading-based syntax from the Isabelle library
|
||||
often necessitates additional type annotations when mixing different monad
|
||||
types within one specification. For similar reasons, no attempt is made to
|
||||
state generic state monad laws in a type class or locale and then instantiate
|
||||
them for the two main monad instances. The resulting duplication from two
|
||||
instances is still easy to handle, but if more instances are added to this
|
||||
library, additional work on genericity would be useful.
|
||||
|
||||
This library has grown over more than a decade with many substantial
|
||||
contributions. We would specifically like to acknowledge the contributions by
|
||||
Nelson Billing, Andrew Boyton, Matthew Brecknell, David Cock, Matthias Daum,
|
||||
Alejandro Gomez-Londono, Rafal Kolanski, Japheth Lim, Michael McInerney, Toby
|
||||
Murray, Lars Noschinski, Edward Pierzchalski, Sean Seefried, Miki Tanaka, Vernon
|
||||
Tang, and Simon Windwood.
|
||||
\end{abstract}
|
||||
|
||||
\tableofcontents
|
||||
|
||||
\parindent 0pt\parskip 0.5ex
|
||||
|
||||
% generated text of all theories
|
||||
\input{session}
|
||||
|
||||
\bibliographystyle{abbrv}
|
||||
\bibliography{root}
|
||||
|
||||
\end{document}
|
||||
|
||||
%%% Local Variables:
|
||||
%%% mode: latex
|
||||
%%% TeX-master: t
|
||||
%%% End:
|
|
@ -14,7 +14,7 @@ begin
|
|||
section \<open>Monads that are wellformed w.r.t. failure\<close>
|
||||
|
||||
text \<open>
|
||||
Usually, well-formed monads constructed from the primitives in Nondet_Monad will have the following
|
||||
Usually, well-formed monads constructed from the primitives in @{text Nondet_Monad} will have the following
|
||||
property: if they return an empty set of results, they will have the failure flag set.\<close>
|
||||
definition empty_fail :: "('s,'a) nondet_monad \<Rightarrow> bool" where
|
||||
"empty_fail m \<equiv> \<forall>s. fst (m s) = {} \<longrightarrow> snd (m s)"
|
||||
|
|
|
@ -130,7 +130,7 @@ lemma no_fail_returnOK[simp, wp]:
|
|||
by (simp add: returnOk_def)
|
||||
|
||||
lemma no_fail_bind[wp]:
|
||||
"\<lbrakk> no_fail P f; \<And>rv. no_fail (R rv) (g rv); \<lbrace>Q\<rbrace> f \<lbrace>R\<rbrace> \<rbrakk> \<Longrightarrow> no_fail (P and Q) (f >>= (\<lambda>rv. g rv))"
|
||||
"\<lbrakk> \<And>rv. no_fail (R rv) (g rv); \<lbrace>Q\<rbrace> f \<lbrace>R\<rbrace>; no_fail P f \<rbrakk> \<Longrightarrow> no_fail (P and Q) (f >>= (\<lambda>rv. g rv))"
|
||||
unfolding no_fail_def bind_def
|
||||
using post_by_hoare by fastforce
|
||||
|
||||
|
|
|
@ -32,7 +32,7 @@ lemma no_throw_def':
|
|||
by (clarsimp simp: no_throw_def validE_def2 split_def split: sum.splits)
|
||||
|
||||
|
||||
subsection \<open>no_throw rules\<close>
|
||||
subsection \<open>@{text no_throw} rules\<close>
|
||||
|
||||
lemma no_throw_returnOk[simp]:
|
||||
"no_throw P (returnOk a)"
|
||||
|
|
|
@ -11,7 +11,7 @@ theory Nondet_Total
|
|||
imports Nondet_No_Fail
|
||||
begin
|
||||
|
||||
section \<open>Total correctness for Nondet_Monad and Nondet_Monad with exceptions\<close>
|
||||
section \<open>Total correctness for @{text Nondet_Monad} and @{text Nondet_Monad} with exceptions\<close>
|
||||
|
||||
subsection Definitions
|
||||
|
||||
|
|
|
@ -33,7 +33,7 @@ text \<open>
|
|||
the empty set, the triple is trivially valid. This means @{term "assert P"}
|
||||
does not require us to prove that @{term P} holds, but rather allows us
|
||||
to assume @{term P}! Proving non-failure is done via a separate predicate and
|
||||
calculus (see Nondet_No_Fail).\<close>
|
||||
calculus (see theory @{text Nondet_No_Fail}).\<close>
|
||||
definition valid ::
|
||||
"('s \<Rightarrow> bool) \<Rightarrow> ('s,'a) nondet_monad \<Rightarrow> ('a \<Rightarrow> 's \<Rightarrow> bool) \<Rightarrow> bool"
|
||||
("\<lbrace>_\<rbrace>/ _ /\<lbrace>_\<rbrace>") where
|
||||
|
|
|
@ -198,7 +198,7 @@ lemma whileLoop_results_induct_lemma3 [consumes 1]:
|
|||
apply clarsimp
|
||||
done
|
||||
|
||||
subsection "Inductive reasoning about whileLoop results"
|
||||
subsection "Inductive reasoning about @{const whileLoop} results"
|
||||
|
||||
lemma in_whileLoop_induct[consumes 1]:
|
||||
assumes in_whileLoop: "(r', s') \<in> fst (whileLoop C B r s)"
|
||||
|
@ -278,7 +278,7 @@ lemma whileLoop_terminatesE_induct [consumes 1]:
|
|||
apply (force simp: lift_def split: sum.splits)
|
||||
done
|
||||
|
||||
subsection "Direct reasoning about whileLoop components"
|
||||
subsection "Direct reasoning about @{const whileLoop} components"
|
||||
|
||||
lemma fst_whileLoop_cond_false:
|
||||
assumes loop_result: "(r', s') \<in> fst (whileLoop C B r s)"
|
||||
|
@ -447,33 +447,36 @@ lemma exs_valid_whileLoop:
|
|||
and wf_R: "wf R"
|
||||
and final_I: "\<And>r s. \<lbrakk> T r s; \<not> C r s \<rbrakk> \<Longrightarrow> Q r s"
|
||||
shows "\<lbrace> P \<rbrace> whileLoop C B r \<exists>\<lbrace> Q \<rbrace>"
|
||||
proof (clarsimp simp: exs_valid_def Bex_def)
|
||||
fix s
|
||||
assume "P s"
|
||||
|
||||
proof -
|
||||
{
|
||||
fix x
|
||||
have "T (fst x) (snd x) \<Longrightarrow> \<exists>r' s'. (r', s') \<in> fst (whileLoop C B (fst x) (snd x)) \<and> T r' s'"
|
||||
using wf_R
|
||||
proof induct
|
||||
case (less x)
|
||||
then show ?case
|
||||
apply atomize
|
||||
apply (cases "C (fst x) (snd x)")
|
||||
apply (subst whileLoop_unroll)
|
||||
apply (clarsimp simp: condition_def bind_def')
|
||||
apply (cut_tac iter_I[where ?s0.0="snd x" and r="fst x"])
|
||||
apply (clarsimp simp: exs_valid_def)
|
||||
apply blast
|
||||
apply (subst whileLoop_unroll)
|
||||
apply (cases x)
|
||||
apply (clarsimp simp: condition_def bind_def' return_def)
|
||||
done
|
||||
qed
|
||||
}
|
||||
fix s
|
||||
assume "P s"
|
||||
|
||||
thus "\<exists>r' s'. (r', s') \<in> fst (whileLoop C B r s) \<and> Q r' s'"
|
||||
by (metis \<open>P s\<close> fst_conv init_T snd_conv final_I fst_whileLoop_cond_false)
|
||||
{
|
||||
fix x
|
||||
have "T (fst x) (snd x) \<Longrightarrow> \<exists>r' s'. (r', s') \<in> fst (whileLoop C B (fst x) (snd x)) \<and> T r' s'"
|
||||
using wf_R
|
||||
proof induct
|
||||
case (less x)
|
||||
then show ?case
|
||||
apply atomize
|
||||
apply (cases "C (fst x) (snd x)")
|
||||
apply (subst whileLoop_unroll)
|
||||
apply (clarsimp simp: condition_def bind_def')
|
||||
apply (cut_tac iter_I[where ?s0.0="snd x" and r="fst x"])
|
||||
apply (clarsimp simp: exs_valid_def)
|
||||
apply blast
|
||||
apply (subst whileLoop_unroll)
|
||||
apply (cases x)
|
||||
apply (clarsimp simp: condition_def bind_def' return_def)
|
||||
done
|
||||
qed
|
||||
}
|
||||
|
||||
then have "\<exists>r' s'. (r', s') \<in> fst (whileLoop C B r s) \<and> Q r' s'"
|
||||
by (metis \<open>P s\<close> fst_conv init_T snd_conv final_I fst_whileLoop_cond_false)
|
||||
}
|
||||
thus ?thesis by (clarsimp simp: exs_valid_def Bex_def)
|
||||
qed
|
||||
|
||||
lemma empty_fail_whileLoop[empty_fail_cond, intro!, wp]:
|
||||
|
@ -618,7 +621,7 @@ lemma whileLoopE_liftE:
|
|||
apply (rule set_eqI, rule iffI)
|
||||
apply clarsimp
|
||||
apply (clarsimp simp: in_liftE whileLoop_def)
|
||||
\<comment> \<open>The schematic existential is instantiated by 'subst isr_Inr_proj' ... 'rule refl' in two lines\<close>
|
||||
\<comment> \<open>The schematic existential is instantiated by @{text "subst isr_Inr_projr ... rule refl"} in two lines\<close>
|
||||
apply (rule exI)
|
||||
apply (rule conjI)
|
||||
apply (subst isr_Inr_projr)
|
||||
|
@ -788,7 +791,7 @@ lemma whileLoopE_wp_inv [wp]:
|
|||
apply (rule validE_whileLoopE [where I=I], auto)
|
||||
done
|
||||
|
||||
subsection "Stronger whileLoop rules"
|
||||
subsection "Stronger @{const whileLoop} rules"
|
||||
|
||||
lemma whileLoop_rule_strong:
|
||||
assumes init_U: "\<lbrace> \<lambda>s'. s' = s \<rbrace> whileLoop C B r \<lbrace> \<lambda>r s. (r, s) \<in> fst Q \<rbrace>"
|
||||
|
@ -848,7 +851,7 @@ lemma snd_whileLoop_subset:
|
|||
done
|
||||
|
||||
|
||||
subsection "Some rules for whileM"
|
||||
subsection "Some rules for @{const whileM}"
|
||||
|
||||
lemma whileM_wp_gen:
|
||||
assumes termin:"\<And>s. I False s \<Longrightarrow> Q s"
|
||||
|
|
|
@ -11,6 +11,8 @@
|
|||
* Option monad while loop formalisation.
|
||||
*)
|
||||
|
||||
chapter \<open>State Projections and Reader Option Monad\<close>
|
||||
|
||||
theory Reader_Option_Monad
|
||||
imports
|
||||
Monad_Lib
|
||||
|
@ -22,7 +24,7 @@ section \<open>Projections\<close>
|
|||
|
||||
type_synonym ('s,'a) lookup = "'s \<Rightarrow> 'a option"
|
||||
|
||||
text \<open>Similar to map_option but the second function returns option as well\<close>
|
||||
text \<open>Similar to @{const map_option} but the second function returns @{text option} as well\<close>
|
||||
definition opt_map :: "('s,'a) lookup \<Rightarrow> ('a \<Rightarrow> 'b option) \<Rightarrow> ('s,'b) lookup" (infixl "|>" 54) where
|
||||
"f |> g \<equiv> \<lambda>s. case f s of None \<Rightarrow> None | Some x \<Rightarrow> g x"
|
||||
|
||||
|
@ -241,7 +243,7 @@ abbreviation ogets :: "('s \<Rightarrow> 'a) \<Rightarrow> ('s, 'a) lookup" wher
|
|||
|
||||
text \<open>
|
||||
Integration with exception monad.
|
||||
Corresponding bindE would be analogous to lifting in Nondet_Monad.\<close>
|
||||
Corresponding bindE would be analogous to lifting in @{text Nondet_Monad}.\<close>
|
||||
|
||||
definition
|
||||
"oreturnOk x = K (Some (Inr x))"
|
||||
|
@ -250,10 +252,10 @@ definition
|
|||
"othrow e = K (Some (Inl e))"
|
||||
|
||||
definition
|
||||
"oguard G \<equiv> (\<lambda>s. if G s then Some () else None)"
|
||||
"oguard G \<equiv> \<lambda>s. if G s then Some () else None"
|
||||
|
||||
definition
|
||||
"ocondition c L R \<equiv> (\<lambda>s. if c s then L s else R s)"
|
||||
"ocondition c L R \<equiv> \<lambda>s. if c s then L s else R s"
|
||||
|
||||
definition
|
||||
"oskip \<equiv> oreturn ()"
|
||||
|
@ -266,26 +268,26 @@ subsection \<open>Monad laws\<close>
|
|||
|
||||
lemma oreturn_bind[simp]:
|
||||
"(oreturn x |>> f) = f x"
|
||||
by (auto simp add: oreturn_def obind_def K_def)
|
||||
by (auto simp add: oreturn_def obind_def)
|
||||
|
||||
lemma obind_return[simp]:
|
||||
"(m |>> oreturn) = m"
|
||||
by (auto simp add: oreturn_def obind_def K_def split: option.splits)
|
||||
by (auto simp add: oreturn_def obind_def split: option.splits)
|
||||
|
||||
lemma obind_assoc:
|
||||
"(m |>> f) |>> g = m |>> (\<lambda>x. f x |>> g)"
|
||||
by (auto simp add: oreturn_def obind_def K_def split: option.splits)
|
||||
by (auto simp add: oreturn_def obind_def split: option.splits)
|
||||
|
||||
|
||||
subsection \<open>Binding and fail\<close>
|
||||
|
||||
lemma obind_fail [simp]:
|
||||
"f |>> (\<lambda>_. ofail) = ofail"
|
||||
by (auto simp add: ofail_def obind_def K_def split: option.splits)
|
||||
by (auto simp add: ofail_def obind_def split: option.splits)
|
||||
|
||||
lemma ofail_bind [simp]:
|
||||
"ofail |>> m = ofail"
|
||||
by (auto simp add: ofail_def obind_def K_def split: option.splits)
|
||||
by (auto simp add: ofail_def obind_def split: option.splits)
|
||||
|
||||
|
||||
subsection \<open>Function package setup\<close>
|
||||
|
@ -348,11 +350,11 @@ lemma ocondition_True:
|
|||
|
||||
lemma in_oreturn [simp]:
|
||||
"(oreturn x s = Some v) = (v = x)"
|
||||
by (auto simp: oreturn_def K_def)
|
||||
by (auto simp: oreturn_def)
|
||||
|
||||
lemma oreturn_None[simp]:
|
||||
"\<not> oreturn x s = None"
|
||||
by (simp add: oreturn_def K_def)
|
||||
by (simp add: oreturn_def)
|
||||
|
||||
lemma oreturnE:
|
||||
"\<lbrakk>oreturn x s = Some v; v = x \<Longrightarrow> P\<rbrakk> \<Longrightarrow> P"
|
||||
|
@ -360,7 +362,7 @@ lemma oreturnE:
|
|||
|
||||
lemma in_ofail[simp]:
|
||||
"ofail s \<noteq> Some v"
|
||||
by (auto simp: ofail_def K_def)
|
||||
by (auto simp: ofail_def)
|
||||
|
||||
lemma ofailE:
|
||||
"ofail s = Some v \<Longrightarrow> P"
|
||||
|
@ -408,7 +410,7 @@ lemma obindE:
|
|||
|
||||
lemma in_othrow_eq[simp]:
|
||||
"(othrow e s = Some v) = (v = Inl e)"
|
||||
by (auto simp: othrow_def K_def)
|
||||
by (auto simp: othrow_def)
|
||||
|
||||
lemma othrowE:
|
||||
"\<lbrakk>othrow e s = Some v; v = Inl e \<Longrightarrow> P\<rbrakk> \<Longrightarrow> P"
|
||||
|
@ -416,7 +418,7 @@ lemma othrowE:
|
|||
|
||||
lemma in_oreturnOk_eq[simp]:
|
||||
"(oreturnOk x s = Some v) = (v = Inr x)"
|
||||
by (auto simp: oreturnOk_def K_def)
|
||||
by (auto simp: oreturnOk_def)
|
||||
|
||||
lemma oreturnOkE:
|
||||
"\<lbrakk>oreturnOk x s = Some v; v = Inr x \<Longrightarrow> P\<rbrakk> \<Longrightarrow> P"
|
||||
|
@ -434,11 +436,11 @@ lemma in_opt_map_Some_None_eq[simp]:
|
|||
|
||||
lemma oreturn_comp[simp]:
|
||||
"oreturn x \<circ> f = oreturn x"
|
||||
by (simp add: oreturn_def K_def o_def)
|
||||
by (simp add: oreturn_def o_def)
|
||||
|
||||
lemma ofail_comp[simp]:
|
||||
"ofail \<circ> f = ofail"
|
||||
by (auto simp: ofail_def K_def)
|
||||
by (auto simp: ofail_def)
|
||||
|
||||
lemma oassert_comp[simp]:
|
||||
"oassert P \<circ> f = oassert P"
|
||||
|
@ -446,7 +448,7 @@ lemma oassert_comp[simp]:
|
|||
|
||||
lemma fail_apply[simp]:
|
||||
"ofail s = None"
|
||||
by (simp add: ofail_def K_def)
|
||||
by (simp add: ofail_def)
|
||||
|
||||
lemma oassert_apply[simp]:
|
||||
"oassert P s = (if P then Some () else None)"
|
||||
|
@ -474,7 +476,7 @@ lemma if_comp_dist:
|
|||
|
||||
lemma obindK_is_opt_map:
|
||||
"f \<bind> (\<lambda>x. K $ g x) = f |> g"
|
||||
by (simp add: obind_def opt_map_def K_def)
|
||||
by (simp add: obind_def opt_map_def)
|
||||
|
||||
|
||||
section \<open>"While" loops over option monad.\<close>
|
||||
|
|
|
@ -59,6 +59,10 @@ lemmas owhile_add_inv = owhile_inv_def[symmetric]
|
|||
|
||||
|
||||
(* WP rules for ovalid. *)
|
||||
lemma ovalid_post_taut[wp]:
|
||||
"\<lblot>P\<rblot> f \<lblot>\<top>\<top>\<rblot>"
|
||||
by (simp add: ovalid_def)
|
||||
|
||||
lemma ovalid_inv[wp]:
|
||||
"ovalid P f (\<lambda>_. P)"
|
||||
by (simp add: ovalid_def)
|
||||
|
@ -116,8 +120,8 @@ lemma owhile_ovalid[wp]:
|
|||
\<Longrightarrow> ovalid (I a) (owhile_inv C B a I M) Q"
|
||||
unfolding owhile_inv_def owhile_def ovalid_def
|
||||
apply clarify
|
||||
apply (frule_tac I = "\<lambda>a. I a s" in option_while_rule)
|
||||
apply auto
|
||||
apply (frule (1) option_while_rule[where I = "\<lambda>a. I a s" for s])
|
||||
apply auto
|
||||
done
|
||||
|
||||
definition ovalid_property where "ovalid_property P x = (\<lambda>s f. (\<forall>r. Some r = x s f \<longrightarrow> P r s))"
|
||||
|
@ -187,7 +191,7 @@ lemma owhile_NF[wp]:
|
|||
\<Longrightarrow> ovalidNF (I a) (owhile_inv C B a I M) Q"
|
||||
unfolding owhile_inv_def ovalidNF_def ovalid_def
|
||||
apply clarify
|
||||
apply (rule_tac I = I and M = "measure (\<lambda>r. M r s)" in owhile_rule)
|
||||
apply (rule owhile_rule[where I = I and M = "measure (\<lambda>r. M r s)" and s = s for s])
|
||||
apply fastforce
|
||||
apply fastforce
|
||||
apply fastforce
|
||||
|
@ -247,7 +251,7 @@ lemma no_ofail_ogets[wp]:
|
|||
by simp
|
||||
|
||||
lemma no_ofail_obind[wp]:
|
||||
"\<lbrakk> \<And>r. no_ofail (P r) (g r); no_ofail Q f; ovalid Q f P \<rbrakk> \<Longrightarrow> no_ofail Q (obind f g)"
|
||||
"\<lbrakk> \<And>r. no_ofail (R r) (g r); \<lblot>Q\<rblot> f \<lblot>R\<rblot>; no_ofail P f \<rbrakk> \<Longrightarrow> no_ofail (P and Q) (f |>> g)"
|
||||
by (auto simp: no_ofail_def obind_def ovalid_def)
|
||||
|
||||
lemma no_ofail_K_bind[wp]:
|
||||
|
@ -275,6 +279,11 @@ lemma no_ofail_oassert_opt[simp, wp]:
|
|||
"no_ofail (\<lambda>_. P \<noteq> None) (oassert_opt P)"
|
||||
by (simp add: no_ofail_def oassert_opt_def split: option.splits)
|
||||
|
||||
lemma no_ofail_if[wp]:
|
||||
"\<lbrakk> P \<Longrightarrow> no_ofail Q f; \<not> P \<Longrightarrow> no_ofail R g \<rbrakk>
|
||||
\<Longrightarrow> no_ofail (if P then Q else R) (if P then f else g)"
|
||||
by simp
|
||||
|
||||
lemma no_ofail_owhen[wp]:
|
||||
"(P \<Longrightarrow> no_ofail Q f) \<Longrightarrow> no_ofail (if P then Q else \<top>) (owhen P f)"
|
||||
by (simp add: no_ofail_def owhen_def)
|
||||
|
@ -287,11 +296,14 @@ lemma no_ofail_oassert[simp, wp]:
|
|||
"no_ofail (\<lambda>_. P) (oassert P)"
|
||||
by (simp add: oassert_def no_ofail_def)
|
||||
|
||||
lemma no_ofail_gets_the:
|
||||
"no_ofail P f \<Longrightarrow> no_fail P (gets_the (f :: ('s, 'a) lookup))"
|
||||
by (fastforce simp: no_ofail_def no_fail_def gets_the_def gets_def
|
||||
get_def assert_opt_def bind_def return_def fail_def
|
||||
split: option.split)
|
||||
lemma no_ofail_gets_the_eq:
|
||||
"no_ofail P f \<longleftrightarrow> no_fail P (gets_the (f :: ('s, 'a) lookup))"
|
||||
by (auto simp: no_ofail_def no_fail_def gets_the_def gets_def
|
||||
get_def assert_opt_def bind_def return_def fail_def
|
||||
split: option.split)
|
||||
|
||||
lemmas no_ofail_gets_the =
|
||||
no_ofail_gets_the_eq[THEN iffD1]
|
||||
|
||||
lemma no_ofail_is_triple[wp_trip]:
|
||||
"no_ofail P f = triple_judgement P f (\<lambda>s f. f s \<noteq> None)"
|
||||
|
@ -348,7 +360,7 @@ lemma ovalidNF_pre_imp:
|
|||
by (simp add: ovalidNF_def)
|
||||
|
||||
lemma no_ofail_pre_imp:
|
||||
"\<lbrakk> \<And>s. P' s \<Longrightarrow> P s; no_ofail P f \<rbrakk> \<Longrightarrow> no_ofail P' f"
|
||||
"\<lbrakk> no_ofail P f; \<And>s. P' s \<Longrightarrow> P s \<rbrakk> \<Longrightarrow> no_ofail P' f"
|
||||
by (simp add: no_ofail_def)
|
||||
|
||||
lemma ovalid_post_imp:
|
||||
|
|
|
@ -55,10 +55,8 @@ lemma det_UN:
|
|||
lemma bind_detI[simp, intro!]:
|
||||
"\<lbrakk> det f; \<forall>x. det (g x) \<rbrakk> \<Longrightarrow> det (f >>= g)"
|
||||
unfolding bind_def det_def
|
||||
apply clarsimp
|
||||
apply (erule_tac x=s in allE)
|
||||
apply clarsimp
|
||||
done
|
||||
apply (erule all_reg[rotated])
|
||||
by clarsimp
|
||||
|
||||
lemma det_modify[iff]:
|
||||
"det (modify f)"
|
||||
|
|
|
@ -14,9 +14,9 @@ begin
|
|||
section \<open>Monads that are wellformed w.r.t. failure\<close>
|
||||
|
||||
text \<open>
|
||||
Usually, well-formed monads constructed from the primitives in Trace_Monad will have the following
|
||||
property: if they return an empty set of completed results, there exists a trace corresponding to
|
||||
a failed result.\<close>
|
||||
Usually, well-formed monads constructed from the primitives in @{text Trace_Monad} will have the
|
||||
following property: if they return an empty set of completed results, there exists a trace
|
||||
corresponding to a failed result.\<close>
|
||||
definition empty_fail :: "('s,'a) tmonad \<Rightarrow> bool" where
|
||||
"empty_fail m \<equiv> \<forall>s. mres (m s) = {} \<longrightarrow> Failed \<in> snd ` (m s)"
|
||||
|
||||
|
@ -58,8 +58,7 @@ lemma empty_failD3:
|
|||
lemma empty_fail_bindD1:
|
||||
"empty_fail (a >>= b) \<Longrightarrow> empty_fail a"
|
||||
unfolding empty_fail_def bind_def
|
||||
apply clarsimp
|
||||
apply (drule_tac x=s in spec)
|
||||
apply (erule all_reg[rotated])
|
||||
by (force simp: split_def mres_def vimage_def split: tmres.splits)
|
||||
|
||||
|
||||
|
@ -112,10 +111,7 @@ lemma empty_fail_select[empty_fail_cond]:
|
|||
lemma mres_bind_empty:
|
||||
"mres ((f >>= g) s) = {}
|
||||
\<Longrightarrow> mres (f s) = {} \<or> (\<forall>res\<in>mres (f s). mres (g (fst res) (snd res)) = {})"
|
||||
apply clarsimp
|
||||
apply (clarsimp simp: mres_def split_def vimage_def bind_def)
|
||||
apply (rename_tac rv s' trace)
|
||||
apply (drule_tac x=rv in spec, drule_tac x=s' in spec)
|
||||
apply (clarsimp simp: bind_def mres_def split_def)
|
||||
apply (clarsimp simp: image_def)
|
||||
apply fastforce
|
||||
done
|
||||
|
|
|
@ -204,8 +204,8 @@ lemma liftE_liftM:
|
|||
lemma liftME_liftM:
|
||||
"liftME f = liftM (case_sum Inl (Inr \<circ> f))"
|
||||
unfolding liftME_def liftM_def bindE_def returnOk_def lift_def
|
||||
apply (rule ext, rename_tac x)
|
||||
apply (rule_tac f="bind x" in arg_cong)
|
||||
apply (rule ext)
|
||||
apply (rule arg_cong[where f="bind m" for m])
|
||||
apply (fastforce simp: throwError_def split: sum.splits)
|
||||
done
|
||||
|
||||
|
|
|
@ -40,7 +40,7 @@ abbreviation map_tmres_rv :: "('a \<Rightarrow> 'b) \<Rightarrow> ('s, 'a) tmres
|
|||
|
||||
text \<open>
|
||||
tmonad returns a set of non-deterministic computations, including
|
||||
a trace as a list of "thread identifier" \<times> state, and an optional
|
||||
a trace as a list of @{text "thread identifier \<times> state"}, and an optional
|
||||
pair of result and state when the computation did not fail.\<close>
|
||||
type_synonym ('s, 'a) tmonad = "'s \<Rightarrow> ((tmid \<times> 's) list \<times> ('s, 'a) tmres) set"
|
||||
|
||||
|
@ -713,7 +713,7 @@ definition whileLoop ::
|
|||
notation (output)
|
||||
whileLoop ("(whileLoop (_)// (_))" [1000, 1000] 1000)
|
||||
|
||||
\<comment> \<open>FIXME: why does this differ to Nondet_Monad?\<close>
|
||||
\<comment> \<open>FIXME: why does this differ to @{text Nondet_Monad}?\<close>
|
||||
definition whileLoopT ::
|
||||
"('r \<Rightarrow> 's \<Rightarrow> bool) \<Rightarrow> ('r \<Rightarrow> ('s, 'r) tmonad) \<Rightarrow> 'r \<Rightarrow> ('s, 'r) tmonad"
|
||||
where
|
||||
|
@ -795,7 +795,7 @@ definition parallel :: "('s,'a) tmonad \<Rightarrow> ('s,'a) tmonad \<Rightarrow
|
|||
\<and> (map (\<lambda>(f_step, (id, s)). (if f_step then Env else id, s)) (zip f_steps xs), rv) \<in> g s})"
|
||||
|
||||
abbreviation(input)
|
||||
"parallel_mrg \<equiv> ((\<lambda>((idn, s), (idn', _)). (if idn = Env then idn' else idn, s)))"
|
||||
"parallel_mrg \<equiv> \<lambda>((idn, s), (idn', _)). (if idn = Env then idn' else idn, s)"
|
||||
|
||||
lemma parallel_def2:
|
||||
"parallel f g = (\<lambda>s. {(xs, rv). \<exists>ys zs. (ys, rv) \<in> f s \<and> (zs, rv) \<in> g s
|
||||
|
@ -806,6 +806,7 @@ lemma parallel_def2:
|
|||
apply (rule exI, rule conjI, assumption)+
|
||||
apply (simp add: list_all2_conv_all_nth list_eq_iff_nth_eq split_def prod_eq_iff)
|
||||
apply clarsimp
|
||||
apply (rename_tac ys zs)
|
||||
apply (rule_tac x="map (((\<noteq>) Env) o fst) ys" in exI)
|
||||
apply (simp add: zip_map1 o_def split_def)
|
||||
apply (strengthen subst[where P="\<lambda>xs. (xs, v) \<in> S" for v S, mk_strg I _ E])
|
||||
|
|
|
@ -117,35 +117,22 @@ lemma gets_the_returns:
|
|||
by (simp_all add: returnOk_def throwError_def
|
||||
gets_the_return)
|
||||
|
||||
lemma all_rv_choice_fn_eq_pred:
|
||||
"\<lbrakk> \<And>rv. P rv \<Longrightarrow> \<exists>fn. f rv = g fn \<rbrakk> \<Longrightarrow> \<exists>fn. \<forall>rv. P rv \<longrightarrow> f rv = g (fn rv)"
|
||||
apply (rule_tac x="\<lambda>rv. SOME h. f rv = g h" in exI)
|
||||
apply (clarsimp split: if_split)
|
||||
by (meson someI_ex)
|
||||
|
||||
lemma all_rv_choice_fn_eq:
|
||||
"\<lbrakk> \<And>rv. \<exists>fn. f rv = g fn \<rbrakk>
|
||||
\<Longrightarrow> \<exists>fn. f = (\<lambda>rv. g (fn rv))"
|
||||
using all_rv_choice_fn_eq_pred[where f=f and g=g and P=\<top>]
|
||||
by (simp add: fun_eq_iff)
|
||||
|
||||
lemma gets_the_eq_bind:
|
||||
"\<lbrakk> \<exists>fn. f = gets_the (fn o fn'); \<And>rv. \<exists>fn. g rv = gets_the (fn o fn') \<rbrakk>
|
||||
"\<lbrakk> f = gets_the (fn_f o fn'); \<And>rv. g rv = gets_the (fn_g rv o fn') \<rbrakk>
|
||||
\<Longrightarrow> \<exists>fn. (f >>= g) = gets_the (fn o fn')"
|
||||
apply (clarsimp dest!: all_rv_choice_fn_eq)
|
||||
apply (rule_tac x="\<lambda>s. case (fn s) of None \<Rightarrow> None | Some v \<Rightarrow> fna v s" in exI)
|
||||
apply clarsimp
|
||||
apply (rule exI[where x="\<lambda>s. case (fn_f s) of None \<Rightarrow> None | Some v \<Rightarrow> fn_g v s"])
|
||||
apply (simp add: gets_the_def bind_assoc exec_gets
|
||||
assert_opt_def fun_eq_iff
|
||||
split: option.split)
|
||||
done
|
||||
|
||||
lemma gets_the_eq_bindE:
|
||||
"\<lbrakk> \<exists>fn. f = gets_the (fn o fn'); \<And>rv. \<exists>fn. g rv = gets_the (fn o fn') \<rbrakk>
|
||||
"\<lbrakk> f = gets_the (fn_f o fn'); \<And>rv. g rv = gets_the (fn_g rv o fn') \<rbrakk>
|
||||
\<Longrightarrow> \<exists>fn. (f >>=E g) = gets_the (fn o fn')"
|
||||
apply (simp add: bindE_def)
|
||||
apply (erule gets_the_eq_bind)
|
||||
unfolding bindE_def
|
||||
apply (erule gets_the_eq_bind[where fn_g="\<lambda>rv s. case rv of Inl e \<Rightarrow> Some (Inl e) | Inr v \<Rightarrow> fn_g v s"])
|
||||
apply (simp add: lift_def gets_the_returns split: sum.split)
|
||||
apply fastforce
|
||||
done
|
||||
|
||||
lemma gets_the_fail:
|
||||
|
@ -376,7 +363,7 @@ lemma select_empty_bind[simp]:
|
|||
by (simp add: select_def bind_def)
|
||||
|
||||
|
||||
subsection \<open>Alternative env_steps with repeat\<close>
|
||||
subsection \<open>Alternative @{text env_steps} with repeat\<close>
|
||||
|
||||
lemma mapM_Cons:
|
||||
"mapM f (x # xs) = do
|
||||
|
@ -531,7 +518,7 @@ lemma put_trace_mapM_x:
|
|||
|
||||
lemma rev_surj:
|
||||
"surj rev"
|
||||
by (rule_tac f=rev in surjI, simp)
|
||||
by (rule surjI[where f=rev], simp)
|
||||
|
||||
lemma select_image:
|
||||
"select (f ` S) = do x \<leftarrow> select S; return (f x) od"
|
||||
|
|
|
@ -137,8 +137,8 @@ lemma hoare_split_bind_case_sum:
|
|||
"\<And>rv. \<lbrace>S rv\<rbrace> h rv \<lbrace>Q\<rbrace>"
|
||||
assumes y: "\<lbrace>P\<rbrace> f \<lbrace>S\<rbrace>,\<lbrace>R\<rbrace>"
|
||||
shows "\<lbrace>P\<rbrace> f >>= case_sum g h \<lbrace>Q\<rbrace>"
|
||||
apply (rule hoare_seq_ext [OF _ y[unfolded validE_def]])
|
||||
apply (case_tac x, simp_all add: x)
|
||||
apply (rule hoare_seq_ext[OF _ y[unfolded validE_def]])
|
||||
apply (wpsimp wp: x split: sum.splits)
|
||||
done
|
||||
|
||||
lemma hoare_split_bind_case_sumE:
|
||||
|
@ -147,8 +147,8 @@ lemma hoare_split_bind_case_sumE:
|
|||
assumes y: "\<lbrace>P\<rbrace> f \<lbrace>S\<rbrace>,\<lbrace>R\<rbrace>"
|
||||
shows "\<lbrace>P\<rbrace> f >>= case_sum g h \<lbrace>Q\<rbrace>,\<lbrace>E\<rbrace>"
|
||||
apply (unfold validE_def)
|
||||
apply (rule hoare_seq_ext [OF _ y[unfolded validE_def]])
|
||||
apply (case_tac x, simp_all add: x [unfolded validE_def])
|
||||
apply (rule hoare_seq_ext[OF _ y[unfolded validE_def]])
|
||||
apply (wpsimp wp: x[unfolded validE_def] split: sum.splits)
|
||||
done
|
||||
|
||||
lemma assertE_sp:
|
||||
|
@ -256,12 +256,11 @@ lemma doesn't_grow_proof:
|
|||
assumes x: "\<And>x. \<lbrace>\<lambda>s. x \<notin> S s \<and> P s\<rbrace> f \<lbrace>\<lambda>rv s. x \<notin> S s\<rbrace>"
|
||||
shows "\<lbrace>\<lambda>s. card (S s) < n \<and> P s\<rbrace> f \<lbrace>\<lambda>rv s. card (S s) < n\<rbrace>"
|
||||
apply (clarsimp simp: valid_def)
|
||||
apply (subgoal_tac "S b \<subseteq> S s")
|
||||
apply (drule card_mono [OF y], simp)
|
||||
apply (erule le_less_trans[rotated])
|
||||
apply (rule card_mono[OF y])
|
||||
apply clarsimp
|
||||
apply (rule ccontr)
|
||||
apply (subgoal_tac "x \<notin> S b", simp)
|
||||
apply (erule use_valid [OF _ x])
|
||||
apply (drule (2) use_valid[OF _ x, OF _ conjI])
|
||||
apply simp
|
||||
done
|
||||
|
||||
|
@ -297,13 +296,12 @@ lemma shrinks_proof:
|
|||
assumes w: "\<And>s. P s \<Longrightarrow> x \<in> S s"
|
||||
shows "\<lbrace>\<lambda>s. card (S s) \<le> n \<and> P s\<rbrace> f \<lbrace>\<lambda>rv s. card (S s) < n\<rbrace>"
|
||||
apply (clarsimp simp: valid_def)
|
||||
apply (subgoal_tac "S b \<subset> S s")
|
||||
apply (drule psubset_card_mono [OF y], simp)
|
||||
apply (erule less_le_trans[rotated])
|
||||
apply (rule psubset_card_mono[OF y])
|
||||
apply (rule psubsetI)
|
||||
apply clarsimp
|
||||
apply (rule ccontr)
|
||||
apply (subgoal_tac "x \<notin> S b", simp)
|
||||
apply (erule use_valid [OF _ x])
|
||||
apply (drule (2) use_valid[OF _ x, OF _ conjI])
|
||||
apply simp
|
||||
by (metis use_valid w z)
|
||||
|
||||
|
@ -393,13 +391,12 @@ lemma P_bool_lift:
|
|||
assumes f: "\<lbrace>\<lambda>s. \<not>Q s\<rbrace> f \<lbrace>\<lambda>r s. \<not>Q s\<rbrace>"
|
||||
shows "\<lbrace>\<lambda>s. P (Q s)\<rbrace> f \<lbrace>\<lambda>r s. P (Q s)\<rbrace>"
|
||||
apply (clarsimp simp: valid_def)
|
||||
apply (subgoal_tac "Q b = Q s")
|
||||
apply simp
|
||||
apply (rule back_subst[where P=P], assumption)
|
||||
apply (rule iffI)
|
||||
apply (rule classical)
|
||||
apply (drule (1) use_valid [OF _ f])
|
||||
apply simp
|
||||
apply (erule (1) use_valid [OF _ t])
|
||||
apply (erule (1) use_valid [OF _ t])
|
||||
apply (rule classical)
|
||||
apply (drule (1) use_valid [OF _ f])
|
||||
apply simp
|
||||
done
|
||||
|
||||
lemmas fail_inv = hoare_fail_any[where Q="\<lambda>_. P" and P=P for P]
|
||||
|
@ -416,11 +413,10 @@ lemma hoare_Ball_helper:
|
|||
assumes y: "\<And>P. \<lbrace>\<lambda>s. P (S s)\<rbrace> f \<lbrace>\<lambda>rv s. P (S s)\<rbrace>"
|
||||
shows "\<lbrace>\<lambda>s. \<forall>x \<in> S s. P x s\<rbrace> f \<lbrace>\<lambda>rv s. \<forall>x \<in> S s. Q x rv s\<rbrace>"
|
||||
apply (clarsimp simp: valid_def)
|
||||
apply (subgoal_tac "S b = S s")
|
||||
apply (erule post_by_hoare2 [OF x])
|
||||
apply (clarsimp simp: Ball_def)
|
||||
apply (erule_tac P1="\<lambda>x. x = S s" in post_by_hoare2 [OF y])
|
||||
apply (rule refl)
|
||||
apply (drule bspec, erule back_subst[where P="\<lambda>A. x\<in>A" for x])
|
||||
apply (erule post_by_hoare[OF y, rotated])
|
||||
apply (rule refl)
|
||||
apply (erule (1) post_by_hoare[OF x])
|
||||
done
|
||||
|
||||
lemma handy_prop_divs:
|
||||
|
@ -479,14 +475,14 @@ lemma hoare_in_monad_post:
|
|||
assumes x: "\<And>P. \<lbrace>P\<rbrace> f \<lbrace>\<lambda>x. P\<rbrace>"
|
||||
shows "\<lbrace>\<top>\<rbrace> f \<lbrace>\<lambda>rv s. (rv, s) \<in> mres (f s)\<rbrace>"
|
||||
apply (clarsimp simp: valid_def)
|
||||
apply (subgoal_tac "s = b", simp)
|
||||
apply (simp add: state_unchanged [OF x])
|
||||
apply (rule back_subst[where P="\<lambda>s. x\<in>mres (f s)" for x], assumption)
|
||||
apply (simp add: state_unchanged[OF x])
|
||||
done
|
||||
|
||||
lemma list_case_throw_validE_R:
|
||||
"\<lbrakk> \<And>y ys. xs = y # ys \<Longrightarrow> \<lbrace>P\<rbrace> f y ys \<lbrace>Q\<rbrace>,- \<rbrakk> \<Longrightarrow>
|
||||
\<lbrace>P\<rbrace> case xs of [] \<Rightarrow> throwError e | x # xs \<Rightarrow> f x xs \<lbrace>Q\<rbrace>,-"
|
||||
apply (case_tac xs, simp_all)
|
||||
apply (cases xs, simp_all)
|
||||
apply wp
|
||||
done
|
||||
|
||||
|
@ -522,13 +518,13 @@ lemma wp_split_const_if:
|
|||
assumes x: "\<lbrace>P\<rbrace> f \<lbrace>Q\<rbrace>"
|
||||
assumes y: "\<lbrace>P'\<rbrace> f \<lbrace>Q'\<rbrace>"
|
||||
shows "\<lbrace>\<lambda>s. (G \<longrightarrow> P s) \<and> (\<not> G \<longrightarrow> P' s)\<rbrace> f \<lbrace>\<lambda>rv s. (G \<longrightarrow> Q rv s) \<and> (\<not> G \<longrightarrow> Q' rv s)\<rbrace>"
|
||||
by (case_tac G, simp_all add: x y)
|
||||
by (cases G, simp_all add: x y)
|
||||
|
||||
lemma wp_split_const_if_R:
|
||||
assumes x: "\<lbrace>P\<rbrace> f \<lbrace>Q\<rbrace>,-"
|
||||
assumes y: "\<lbrace>P'\<rbrace> f \<lbrace>Q'\<rbrace>,-"
|
||||
shows "\<lbrace>\<lambda>s. (G \<longrightarrow> P s) \<and> (\<not> G \<longrightarrow> P' s)\<rbrace> f \<lbrace>\<lambda>rv s. (G \<longrightarrow> Q rv s) \<and> (\<not> G \<longrightarrow> Q' rv s)\<rbrace>,-"
|
||||
by (case_tac G, simp_all add: x y)
|
||||
by (cases G, simp_all add: x y)
|
||||
|
||||
lemma hoare_disj_division:
|
||||
"\<lbrakk> P \<or> Q; P \<Longrightarrow> \<lbrace>R\<rbrace> f \<lbrace>S\<rbrace>; Q \<Longrightarrow> \<lbrace>T\<rbrace> f \<lbrace>S\<rbrace> \<rbrakk>
|
||||
|
@ -589,11 +585,12 @@ lemma univ_wp:
|
|||
lemma univ_get_wp:
|
||||
assumes x: "\<And>P. \<lbrace>P\<rbrace> f \<lbrace>\<lambda>rv. P\<rbrace>"
|
||||
shows "\<lbrace>\<lambda>s. \<forall>(rv, s') \<in> mres (f s). s = s' \<longrightarrow> Q rv s'\<rbrace> f \<lbrace>Q\<rbrace>"
|
||||
apply (rule hoare_pre_imp [OF _ univ_wp])
|
||||
apply (rule hoare_pre_imp[OF _ univ_wp])
|
||||
apply clarsimp
|
||||
apply (drule bspec, assumption, simp)
|
||||
apply (subgoal_tac "s = b", simp)
|
||||
apply (simp add: state_unchanged [OF x])
|
||||
apply (drule mp)
|
||||
apply (simp add: state_unchanged[OF x])
|
||||
apply simp
|
||||
done
|
||||
|
||||
lemma other_hoare_in_monad_post:
|
||||
|
@ -630,10 +627,10 @@ lemma bindE_split_recursive_asm:
|
|||
apply (clarsimp simp: validE_def valid_def bindE_def in_bind lift_def)
|
||||
apply (erule allE, erule(1) impE)
|
||||
apply (drule(1) bspec, simp)
|
||||
apply (case_tac x, simp_all add: in_throwError)
|
||||
apply (clarsimp simp: in_throwError split: sum.splits)
|
||||
apply (drule x)
|
||||
apply (clarsimp simp: validE_def valid_def)
|
||||
apply (drule(1) bspec, simp)
|
||||
apply (drule(1) bspec, simp split: sum.splits)
|
||||
done
|
||||
|
||||
lemma validE_R_abstract_rv:
|
||||
|
@ -687,9 +684,8 @@ lemma valid_rv_split:
|
|||
lemma hoare_rv_split:
|
||||
"\<lbrakk>\<lbrace>P\<rbrace> f \<lbrace>\<lambda>rv s. rv \<longrightarrow> (Q rv s)\<rbrace>; \<lbrace>P\<rbrace> f \<lbrace>\<lambda>rv s. (\<not>rv) \<longrightarrow> (Q rv s)\<rbrace>\<rbrakk>
|
||||
\<Longrightarrow> \<lbrace>P\<rbrace> f \<lbrace>Q\<rbrace>"
|
||||
apply (clarsimp simp: valid_def)
|
||||
apply (case_tac a, fastforce+)
|
||||
done
|
||||
apply (clarsimp simp: valid_def split_def)
|
||||
by (metis (full_types) fst_eqD snd_conv)
|
||||
|
||||
lemma combine_validE:
|
||||
"\<lbrakk> \<lbrace> P \<rbrace> x \<lbrace> Q \<rbrace>,\<lbrace> E \<rbrace>; \<lbrace> P' \<rbrace> x \<lbrace> Q' \<rbrace>,\<lbrace> E' \<rbrace> \<rbrakk>
|
||||
|
@ -718,23 +714,19 @@ lemma validE_pre_satisfies_post:
|
|||
|
||||
lemma hoare_validE_R_conjI:
|
||||
"\<lbrakk> \<lbrace>P\<rbrace> f \<lbrace>Q\<rbrace>, - ; \<lbrace>P\<rbrace> f \<lbrace>Q'\<rbrace>, - \<rbrakk> \<Longrightarrow> \<lbrace>P\<rbrace> f \<lbrace>\<lambda>rv s. Q rv s \<and> Q' rv s\<rbrace>, -"
|
||||
apply (clarsimp simp: Ball_def validE_R_def validE_def valid_def)
|
||||
by (case_tac a; fastforce)
|
||||
by (fastforce simp: Ball_def validE_R_def validE_def valid_def split: sum.splits)
|
||||
|
||||
lemma hoare_validE_E_conjI:
|
||||
"\<lbrakk> \<lbrace>P\<rbrace> f -, \<lbrace>Q\<rbrace> ; \<lbrace>P\<rbrace> f -, \<lbrace>Q'\<rbrace> \<rbrakk> \<Longrightarrow> \<lbrace>P\<rbrace> f -, \<lbrace>\<lambda>rv s. Q rv s \<and> Q' rv s\<rbrace>"
|
||||
apply (clarsimp simp: Ball_def validE_E_def validE_def valid_def)
|
||||
by (case_tac a; fastforce)
|
||||
by (fastforce simp: Ball_def validE_E_def validE_def valid_def split: sum.splits)
|
||||
|
||||
lemma validE_R_post_conjD1:
|
||||
"\<lbrace>P\<rbrace> f \<lbrace>\<lambda>r s. Q r s \<and> R r s\<rbrace>,- \<Longrightarrow> \<lbrace>P\<rbrace> f \<lbrace>Q\<rbrace>,-"
|
||||
apply (clarsimp simp: validE_R_def validE_def valid_def)
|
||||
by (case_tac a; fastforce)
|
||||
by (fastforce simp: validE_R_def validE_def valid_def split: sum.splits)
|
||||
|
||||
lemma validE_R_post_conjD2:
|
||||
"\<lbrace>P\<rbrace> f \<lbrace>\<lambda>r s. Q r s \<and> R r s\<rbrace>,- \<Longrightarrow> \<lbrace>P\<rbrace> f \<lbrace>R\<rbrace>,-"
|
||||
apply (clarsimp simp: validE_R_def validE_def valid_def)
|
||||
by (case_tac a; fastforce)
|
||||
by (fastforce simp: validE_R_def validE_def valid_def split: sum.splits)
|
||||
|
||||
lemma throw_opt_wp[wp]:
|
||||
"\<lbrace>if v = None then E ex else Q (the v)\<rbrace> throw_opt ex v \<lbrace>Q\<rbrace>,\<lbrace>E\<rbrace>"
|
||||
|
|
|
@ -144,7 +144,7 @@ lemma no_fail_assume_pre:
|
|||
"(\<And>s. P s \<Longrightarrow> no_fail P f) \<Longrightarrow> no_fail P f"
|
||||
by (simp add: no_fail_def)
|
||||
|
||||
\<comment> \<open>lemma no_fail_liftM_eq[simp]:
|
||||
\<^cancel>\<open>lemma no_fail_liftM_eq[simp]:
|
||||
"no_fail P (liftM f m) = no_fail P m"
|
||||
by (auto simp: liftM_def no_fail_def bind_def return_def)\<close>
|
||||
|
||||
|
|
|
@ -31,7 +31,7 @@ lemma no_throw_def':
|
|||
by (clarsimp simp: no_throw_def validE_def2 split_def split: sum.splits)
|
||||
|
||||
|
||||
subsection \<open>no_throw rules\<close>
|
||||
subsection \<open>@{const no_throw} rules\<close>
|
||||
|
||||
lemma no_throw_returnOk[simp]:
|
||||
"no_throw P (returnOk a)"
|
||||
|
|
|
@ -17,7 +17,7 @@ section \<open>Rely-Guarantee Logic\<close>
|
|||
subsection \<open>Validity\<close>
|
||||
|
||||
text \<open>
|
||||
This section defines a Rely_Guarantee logic for partial correctness for
|
||||
This section defines a Rely-Guarantee logic for partial correctness for
|
||||
the interference trace monad.
|
||||
|
||||
The logic is defined semantically. Rules work directly on the
|
||||
|
@ -36,7 +36,7 @@ text \<open>
|
|||
trace and no successful results then the quintuple is trivially valid. This
|
||||
means @{term "assert P"} does not require us to prove that @{term P} holds,
|
||||
but rather allows us to assume @{term P}! Proving non-failure is done via a
|
||||
separate predicate and calculus (see Trace_No_Fail).\<close>
|
||||
separate predicate and calculus (see @{text Trace_No_Fail}).\<close>
|
||||
|
||||
primrec trace_steps :: "(tmid \<times> 's) list \<Rightarrow> 's \<Rightarrow> (tmid \<times> 's \<times> 's) set" where
|
||||
"trace_steps (elem # trace) s0 = {(fst elem, s0, snd elem)} \<union> trace_steps trace (snd elem)"
|
||||
|
@ -56,7 +56,7 @@ next
|
|||
done
|
||||
qed
|
||||
|
||||
text \<open>rg_pred type: Rely-Guaranty predicates (state before => state after => bool)\<close>
|
||||
text \<open>@{text rg_pred} type: Rely-Guaranty predicates (@{text "state before => state after => bool"})\<close>
|
||||
type_synonym 's rg_pred = "'s \<Rightarrow> 's \<Rightarrow> bool"
|
||||
|
||||
text \<open>Abbreviations for trivial postconditions (taking three arguments):\<close>
|
||||
|
@ -272,8 +272,8 @@ proof (induct n arbitrary: res)
|
|||
case 0 then show ?case by auto
|
||||
next
|
||||
case (Suc n)
|
||||
have drop_1: "\<And>tr res. (tr, res) \<in> f s \<Longrightarrow> \<exists>res'. (drop 1 tr, res') \<in> f s"
|
||||
by (case_tac tr; fastforce dest: prefix_closedD[rotated] intro: Suc)
|
||||
have drop_1: "(tr, res) \<in> f s \<Longrightarrow> \<exists>res'. (drop 1 tr, res') \<in> f s" for tr res
|
||||
by (cases tr; fastforce dest: prefix_closedD[rotated] intro: Suc)
|
||||
show ?case
|
||||
using Suc.hyps[OF Suc.prems]
|
||||
by (metis drop_1[simplified] drop_drop add_0 add_Suc)
|
||||
|
@ -291,7 +291,8 @@ lemma parallel_prefix_closed[wp_split]:
|
|||
"\<lbrakk>prefix_closed f; prefix_closed g\<rbrakk>
|
||||
\<Longrightarrow> prefix_closed (parallel f g)"
|
||||
apply (subst prefix_closed_def, clarsimp simp: parallel_def)
|
||||
apply (case_tac f_steps; clarsimp)
|
||||
apply (subst (asm) zip.zip_Cons)
|
||||
apply (clarsimp split: list.splits)
|
||||
apply (drule(1) prefix_closedD)+
|
||||
apply fastforce
|
||||
done
|
||||
|
@ -338,7 +339,7 @@ lemma guar_cond_drop_Suc:
|
|||
"\<lbrakk>guar_cond R s0 (drop (Suc n) xs);
|
||||
fst (xs ! n) \<noteq> Env \<longrightarrow> R (last_st_tr (drop (Suc n) xs) s0) (snd (xs ! n))\<rbrakk>
|
||||
\<Longrightarrow> guar_cond R s0 (drop n xs)"
|
||||
by (case_tac "n < length xs"; simp add: guar_cond_drop_Suc_eq)
|
||||
by (cases "n < length xs"; simp add: guar_cond_drop_Suc_eq)
|
||||
|
||||
lemma rely_cond_Cons_eq:
|
||||
"rely_cond R s0 (x # xs)
|
||||
|
@ -427,8 +428,9 @@ proof -
|
|||
hd_drop_conv_nth hd_append)
|
||||
apply (fastforce simp: split_def intro!: nth_equalityI)
|
||||
apply clarsimp
|
||||
apply (erule_tac x=n in meta_allE)+
|
||||
apply (drule meta_mp, erule rely_cond_is_drop, simp)
|
||||
apply clarsimp
|
||||
apply (erule meta_allE, drule meta_mp, assumption)+
|
||||
apply (subst(asm) rely_cond_drop_Suc_eq[where xs="map f xs" for f xs], simp)
|
||||
apply (clarsimp simp: last_st_tr_drop_map_zip_hd if_split[where P="\<lambda>x. x = Env"]
|
||||
split_def)
|
||||
|
@ -493,7 +495,7 @@ lemma put_trace_res:
|
|||
\<Longrightarrow> \<exists>n. tr = drop n xs \<and> n \<le> length xs
|
||||
\<and> res = (case n of 0 \<Rightarrow> Result ((), s) | _ \<Rightarrow> Incomplete)"
|
||||
apply (clarsimp simp: put_trace_eq_drop)
|
||||
apply (case_tac n; auto intro: exI[where x=0])
|
||||
apply (auto simp: gr0_conv_Suc intro: exI[where x=0])
|
||||
done
|
||||
|
||||
lemma put_trace_twp[wp]:
|
||||
|
@ -732,13 +734,16 @@ lemmas modify_prefix_closed[simp] =
|
|||
modify_wp[THEN valid_validI_wp[OF no_trace_all(3)], THEN validI_prefix_closed]
|
||||
lemmas await_prefix_closed[simp] = Await_sync_twp[THEN validI_prefix_closed]
|
||||
|
||||
lemma repeat_n_prefix_closed[intro!]:
|
||||
"prefix_closed f \<Longrightarrow> prefix_closed (repeat_n n f)"
|
||||
apply (induct n; simp)
|
||||
apply (auto intro: prefix_closed_bind)
|
||||
done
|
||||
|
||||
lemma repeat_prefix_closed[intro!]:
|
||||
"prefix_closed f \<Longrightarrow> prefix_closed (repeat f)"
|
||||
apply (simp add: repeat_def)
|
||||
apply (rule prefix_closed_bind; clarsimp)
|
||||
apply (rename_tac n)
|
||||
apply (induct_tac n; simp)
|
||||
apply (auto intro: prefix_closed_bind)
|
||||
done
|
||||
|
||||
lemma rely_cond_True[simp]:
|
||||
|
|
|
@ -11,7 +11,7 @@ theory Trace_Total
|
|||
imports Trace_No_Fail
|
||||
begin
|
||||
|
||||
section \<open>Total correctness for Trace_Monad and Trace_Monad with exceptions\<close>
|
||||
section \<open>Total correctness for @{text Trace_Monad} and @{text Trace_Monad} with exceptions\<close>
|
||||
|
||||
subsection Definitions
|
||||
|
||||
|
|
|
@ -33,7 +33,7 @@ text \<open>
|
|||
computation is the empty set then the triple is trivially valid. This means
|
||||
@{term "assert P"} does not require us to prove that @{term P} holds, but
|
||||
rather allows us to assume @{term P}! Proving non-failure is done via a
|
||||
separate predicate and calculus (see Trace_No_Fail).\<close>
|
||||
separate predicate and calculus (see @{text Trace_No_Fail}).\<close>
|
||||
definition valid ::
|
||||
"('s \<Rightarrow> bool) \<Rightarrow> ('s,'a) tmonad \<Rightarrow> ('a \<Rightarrow> 's \<Rightarrow> bool) \<Rightarrow> bool"
|
||||
("\<lbrace>_\<rbrace>/ _ /\<lbrace>_\<rbrace>") where
|
||||
|
@ -1016,7 +1016,7 @@ lemmas liftME_E_E_wp[wp_split] = validE_validE_E [OF liftME_wp, simplified, OF v
|
|||
lemma assert_opt_wp:
|
||||
"\<lbrace>\<lambda>s. x \<noteq> None \<longrightarrow> Q (the x) s\<rbrace> assert_opt x \<lbrace>Q\<rbrace>"
|
||||
unfolding assert_opt_def
|
||||
by (case_tac x; wpsimp wp: fail_wp return_wp)
|
||||
by (cases x; wpsimp wp: fail_wp return_wp)
|
||||
|
||||
lemma gets_the_wp:
|
||||
"\<lbrace>\<lambda>s. (f s \<noteq> None) \<longrightarrow> Q (the (f s)) s\<rbrace> gets_the f \<lbrace>Q\<rbrace>"
|
||||
|
|
|
@ -26,7 +26,7 @@ text \<open>
|
|||
schematic does not have as parameters.
|
||||
|
||||
In the "constructor expression" case, we let users supply additional
|
||||
constructor handlers via the `datatype_schematic` attribute. The method uses
|
||||
constructor handlers via the @{text "datatype_schematic"} attribute. The method uses
|
||||
rules of the following form:
|
||||
|
||||
@{term "\<And>x1 x2 x3. getter (constructor x1 x2 x3) = x2"}
|
||||
|
@ -69,18 +69,18 @@ structure Datatype_Schematic_Data = Generic_Data
|
|||
(
|
||||
\<comment> \<open>
|
||||
Keys are names of datatype constructors (like @{const Cons}), values are
|
||||
`(index, function_name, thm)`.
|
||||
@{text "(index, function_name, thm)"}.
|
||||
|
||||
- `function_name` is the name of an "accessor" function that accesses part
|
||||
- @{text function_name} is the name of an "accessor" function that accesses part
|
||||
of the constructor specified by the key (so the accessor @{const hd} is
|
||||
related to the constructor/key @{const Cons}).
|
||||
|
||||
- `thm` is a theorem showing that the function accesses one of the
|
||||
- @{text thm} is a theorem showing that the function accesses one of the
|
||||
arguments to the constructor (like @{thm list.sel(1)}).
|
||||
|
||||
- `idx` is the index of the constructor argument that the accessor
|
||||
accesses. (eg. since `hd` accesses the first argument, `idx = 0`; since
|
||||
`tl` accesses the second argument, `idx = 1`).
|
||||
- @{text idx} is the index of the constructor argument that the accessor
|
||||
accesses. (eg. since @{const hd} accesses the first argument, @{text "idx = 0"}; since
|
||||
@{const tl} accesses the second argument, @{text "idx = 1"}).
|
||||
\<close>
|
||||
type T = ((int * string * thm) list) Symtab.table;
|
||||
val empty = Symtab.empty;
|
||||
|
@ -287,7 +287,7 @@ lemma selectively_exposing_datatype_arugments:
|
|||
notes get_basic_0.simps[datatype_schematic]
|
||||
shows "\<exists>x. \<forall>a b. x (basic a b) = a"
|
||||
apply (rule exI, (rule allI)+)
|
||||
apply datatype_schem \<comment> \<open>Only exposes `a` to the schematic.\<close>
|
||||
apply datatype_schem \<comment> \<open>Only exposes @{text a} to the schematic.\<close>
|
||||
by (rule refl)
|
||||
|
||||
lemma method_handles_primrecs_with_two_constructors:
|
||||
|
|
|
@ -17,10 +17,10 @@ begin
|
|||
|
||||
|
||||
text \<open>
|
||||
Methods for manipulating the post conditions of hoare triples as if they
|
||||
Methods for manipulating the post conditions of Hoare triples as if they
|
||||
were proper subgoals.
|
||||
|
||||
post_asm can be used with the @ attribute to modify existing proofs. Useful
|
||||
@{text post_asm} can be used with the \@ attribute to modify existing proofs. Useful
|
||||
for proving large postconditions in one proof and then subsequently decomposing it.
|
||||
|
||||
\<close>
|
||||
|
@ -98,8 +98,8 @@ end
|
|||
|
||||
|
||||
text \<open>
|
||||
Method (meant to be used with @ as an attribute) used for producing multiple facts out of
|
||||
a single hoare triple with a conjunction in its post condition.
|
||||
Method (meant to be used with \@ as an attribute) used for producing multiple facts out of
|
||||
a single Hoare triple with a conjunction in its post condition.
|
||||
\<close>
|
||||
|
||||
context begin
|
||||
|
|
|
@ -4,6 +4,8 @@
|
|||
* SPDX-License-Identifier: BSD-2-Clause
|
||||
*)
|
||||
|
||||
section \<open>Weakest Preconditions\<close>
|
||||
|
||||
theory WP
|
||||
imports
|
||||
WP_Pre
|
||||
|
|
|
@ -242,11 +242,13 @@ lemma demo2:
|
|||
\<and> (\<forall>x. I x \<longrightarrow> (case x of None \<Rightarrow> R (Inl 8) | Some y \<Rightarrow> R (Inr y)))
|
||||
\<and> (\<forall>x. I x \<longrightarrow> (case x of None \<Rightarrow> R (Inr 9) | Some y \<Rightarrow> R (Inl (y - 1))))"
|
||||
apply (intro exI conjI[rotated] allI)
|
||||
apply (rename_tac x)
|
||||
apply (case_tac x; simp)
|
||||
apply wpfix
|
||||
apply (rule P)
|
||||
apply wpfix
|
||||
apply (rule P)
|
||||
apply (rename_tac x)
|
||||
apply (case_tac x; simp)
|
||||
apply wpfix
|
||||
apply (rule P)
|
||||
|
|
|
@ -43,7 +43,7 @@ lemma bool_function_four_cases:
|
|||
by (auto simp add: fun_eq_iff all_bool_eq)
|
||||
|
||||
|
||||
text \<open>The ML version of repeat_new is slightly faster than the Eisbach one.\<close>
|
||||
text \<open>The ML version of @{text repeat_new} is slightly faster than the Eisbach one.\<close>
|
||||
|
||||
method_setup repeat_new =
|
||||
\<open>Method.text_closure >> (fn m => fn ctxt => fn facts =>
|
||||
|
@ -116,6 +116,7 @@ private lemma
|
|||
(atomic f (\<lambda>s. A' s \<and> Pres' s) (\<lambda>r s. A r s \<and> Pres r s) B Q' \<Longrightarrow> trip Ts) \<Longrightarrow> trip Ts"
|
||||
apply (erule meta_mp)
|
||||
apply (clarsimp simp: valid_def atomic_def)
|
||||
apply (rename_tac P s r s')
|
||||
apply (drule_tac x=P in spec)
|
||||
apply (drule_tac x=P in meta_spec)
|
||||
apply (drule_tac x=s in spec)+
|
||||
|
@ -142,7 +143,7 @@ private lemma
|
|||
by (simp add: atomic_def )
|
||||
|
||||
text \<open>Decomposing a static term is a waste of time as we know we can lift it
|
||||
out all in one go. Additionally this stops wp_drop_imp from uselessly taking it apart.\<close>
|
||||
out all in one go. Additionally this stops @{text wp_drop_imp} from uselessly taking it apart.\<close>
|
||||
|
||||
|
||||
private definition "static Q = (\<lambda>r s. Q)"
|
||||
|
@ -186,7 +187,7 @@ private lemma
|
|||
private lemma trips_True: "trip True" by (simp add: trip_def)
|
||||
|
||||
|
||||
text \<open>We need to push the quantifiers into the hoare triples.
|
||||
text \<open>We need to push the quantifiers into the Hoare triples.
|
||||
This is an unfortunate bit of manual work, but anything more than 2 levels
|
||||
of nesting is unlikely.\<close>
|
||||
|
||||
|
@ -209,7 +210,7 @@ text \<open>Existentials are hard, and we don't see them often
|
|||
we fail to process the triple and it won't be lifted.
|
||||
|
||||
Some more work here to allow the heuristics to drop any added implications
|
||||
if they're deemed unecessary.\<close>
|
||||
if they're deemed unnecessary.\<close>
|
||||
|
||||
private lemma trips_push_ex1:
|
||||
"trip (\<exists>x. \<lbrace>\<lambda>s. Q s\<rbrace> f \<lbrace>\<lambda>r s. Q' x r s\<rbrace>) \<Longrightarrow>
|
||||
|
@ -269,7 +270,7 @@ private method uses_arg for C :: "'a \<Rightarrow> 'b \<Rightarrow> bool" =
|
|||
determ \<open>(match (C) in "\<lambda>r s. ?discard_r s" (cut) \<Rightarrow> \<open>fail\<close> \<bar> _ \<Rightarrow> \<open>-\<close>)\<close>
|
||||
|
||||
text \<open>Here the "test" constant holds information about the logical context of the atomic postcondition
|
||||
in the original hoare triple. "f" is the function with its arguments, "C" is all the collected
|
||||
in the original Hoare triple. "f" is the function with its arguments, "C" is all the collected
|
||||
premises and "Q" is the atomic postcondition that we want to solve in isolation.
|
||||
|
||||
The method succeeds if the atomic postcondition seems to not depend on its context, i.e.
|
||||
|
@ -305,7 +306,7 @@ private method make_goals methods wp_weak wp_strong tests =
|
|||
|
||||
text \<open>Once all the triples exist we simplify them all in one go to
|
||||
find trivial or self-contradictory rules. This avoids invoking the simplifier
|
||||
once per postcondition. imp_conjL is used to curry our generated implications.
|
||||
once per postcondition. @{thm imp_conjL} is used to curry our generated implications.
|
||||
|
||||
If all the postconditions together are contradictory, the simplifier won't use it to strengthen
|
||||
the postcondition. As an optimization we simply bail out in that case, rather than
|
||||
|
@ -324,7 +325,7 @@ method post_strengthen methods wp_weak wp_strong simp' tests =
|
|||
rule trip_drop,
|
||||
(rule hoare_vcg_prop)?)
|
||||
|
||||
text \<open>The "wpi" named theorem is used to avoid the safety heuristics, effectively
|
||||
text \<open>The @{text wpi} named theorem is used to avoid the safety heuristics, effectively
|
||||
saying that the presence of that postcondition indicates that it should always be lifted.\<close>
|
||||
|
||||
named_theorems wpi
|
||||
|
|
|
@ -12,6 +12,8 @@ imports
|
|||
"HOL-Eisbach.Eisbach_Tools"
|
||||
begin
|
||||
|
||||
section \<open>Creating Schematic Preconditions\<close>
|
||||
|
||||
ML \<open>
|
||||
structure WP_Pre = struct
|
||||
|
||||
|
|
1
lib/ROOT
1
lib/ROOT
|
@ -67,6 +67,7 @@ session Lib (lib) = Word_Lib +
|
|||
Value_Type
|
||||
Named_Eta
|
||||
Rules_Tac
|
||||
Heap_List
|
||||
|
||||
(* should move to Monads: *)
|
||||
NonDetMonadLemmaBucket
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
*)
|
||||
|
||||
theory CCorresLemmas
|
||||
imports CCorres_Rewrite
|
||||
imports CCorres_Rewrite MonadicRewrite_C
|
||||
begin
|
||||
|
||||
lemma ccorres_rel_imp2:
|
||||
|
@ -1040,119 +1040,208 @@ lemma ccorres_Guard_True_Seq:
|
|||
\<Longrightarrow> ccorres_underlying sr \<Gamma> r xf arrel axf A C hs a (Guard F \<lbrace>True\<rbrace> c ;; d)"
|
||||
by (simp, ccorres_rewrite, assumption)
|
||||
|
||||
lemma exec_While_final_inv'':
|
||||
"\<lbrakk> \<Gamma> \<turnstile> \<langle>b, x\<rangle> \<Rightarrow> s'; b = While C B; x = Normal s;
|
||||
\<And>s. s \<notin> C \<Longrightarrow> I s (Normal s);
|
||||
\<And>t t' t''. \<lbrakk> t \<in> C; \<Gamma>\<turnstile> \<langle>B, Normal t\<rangle> \<Rightarrow> Normal t'; \<Gamma>\<turnstile> \<langle>While C B, Normal t'\<rangle> \<Rightarrow> t'';
|
||||
I t' t'' \<rbrakk> \<Longrightarrow> I t t'';
|
||||
\<And>t t'. \<lbrakk> t \<in> C; \<Gamma>\<turnstile> \<langle>B, Normal t\<rangle> \<Rightarrow> Abrupt t' \<rbrakk> \<Longrightarrow> I t (Abrupt t');
|
||||
\<And>t. \<lbrakk> t \<in> C; \<Gamma> \<turnstile> \<langle>B, Normal t\<rangle> \<Rightarrow> Stuck \<rbrakk> \<Longrightarrow> I t Stuck;
|
||||
\<And>t f. \<lbrakk> t \<in> C; \<Gamma>\<turnstile> \<langle>B, Normal t\<rangle> \<Rightarrow> Fault f \<rbrakk> \<Longrightarrow> I t (Fault f) \<rbrakk>
|
||||
\<Longrightarrow> I s s'"
|
||||
apply (induct arbitrary: s rule: exec.induct; simp)
|
||||
apply (erule exec_elim_cases; fastforce simp: exec.WhileTrue exec.WhileFalse)
|
||||
lemma ccorres_While_Normal_helper:
|
||||
assumes setter_inv:
|
||||
"\<Gamma> \<turnstile> {s'. \<exists>rv s. G rv s s'} setter {s'. \<exists>rv s. G rv s s' \<and> (cond_xf s' \<noteq> 0 \<longrightarrow> Cnd rv s s')}"
|
||||
assumes body_inv: "\<Gamma> \<turnstile> {s'. \<exists>rv s. G rv s s' \<and> Cnd rv s s'} B {s'. \<exists>rv s. G rv s s'}"
|
||||
shows "\<Gamma> \<turnstile> ({s'. \<exists>rv s. G rv s s' \<and> (cond_xf s' \<noteq> 0 \<longrightarrow> Cnd rv s s')})
|
||||
While {s'. cond_xf s' \<noteq> 0} (Seq B setter)
|
||||
{s'. \<exists>rv s. G rv s s'}"
|
||||
apply (insert assms)
|
||||
apply (rule hoare_complete)
|
||||
apply (simp add: cvalid_def HoarePartialDef.valid_def)
|
||||
apply (intro allI)
|
||||
apply (rename_tac xstate xstate')
|
||||
apply (rule impI)
|
||||
apply (case_tac "\<not> isNormal xstate")
|
||||
apply fastforce
|
||||
apply (simp add: isNormal_def)
|
||||
apply (elim exE)
|
||||
apply (simp add: image_def)
|
||||
apply (erule exec_While_final_inv''[where C="{s'. cond_xf s' \<noteq> 0}" and B="B;; setter"]; clarsimp)
|
||||
apply (frule intermediate_Normal_state[OF _ _ body_inv])
|
||||
apply fastforce
|
||||
apply clarsimp
|
||||
apply (rename_tac inter_t)
|
||||
apply (frule hoarep_exec[OF _ _ body_inv, rotated], fastforce)
|
||||
apply (frule_tac s=inter_t in hoarep_exec[rotated 2], fastforce+)[1]
|
||||
apply (metis (mono_tags, lifting) HoarePartial.SeqSwap empty_iff exec_abrupt mem_Collect_eq)
|
||||
apply (metis (mono_tags, lifting) HoarePartial.SeqSwap exec_stuck mem_Collect_eq)
|
||||
apply (metis (mono_tags, lifting) HoarePartial.SeqSwap empty_iff exec_fault mem_Collect_eq)
|
||||
done
|
||||
|
||||
lemma intermediate_Normal_state:
|
||||
"\<lbrakk>\<Gamma> \<turnstile> \<langle>Seq c\<^sub>1 c\<^sub>2, Normal t\<rangle> \<Rightarrow> t''; t \<in> P; \<Gamma> \<turnstile> P c\<^sub>1 Q\<rbrakk>
|
||||
\<Longrightarrow> \<exists>t'. \<Gamma> \<turnstile> \<langle>c\<^sub>1, Normal t\<rangle> \<Rightarrow> Normal t' \<and> \<Gamma> \<turnstile> \<langle>c\<^sub>2, Normal t'\<rangle> \<Rightarrow> t''"
|
||||
apply (erule exec_Normal_elim_cases(8))
|
||||
apply (insert hoarep_exec)
|
||||
apply fastforce
|
||||
done
|
||||
|
||||
lemma While_inv_from_body:
|
||||
"\<Gamma> \<turnstile> (G \<inter> C) B G \<Longrightarrow> \<Gamma> \<turnstile> G While C B G"
|
||||
apply (drule hoare_sound)+
|
||||
apply (rule hoare_complete)
|
||||
apply (clarsimp simp: cvalid_def HoarePartialDef.valid_def)
|
||||
by (erule exec_While_final_inv''[where I="\<lambda>s s'. s \<in> G \<longrightarrow> s' \<in> Normal ` G", THEN impE],
|
||||
fastforce+)
|
||||
|
||||
lemma While_inv_from_body_setter:
|
||||
"\<lbrakk>\<Gamma> \<turnstile> G setter (G \<inter> {s. s \<in> C \<longrightarrow> s \<in> Cnd}); \<Gamma> \<turnstile> (G \<inter> Cnd) B G\<rbrakk>
|
||||
\<Longrightarrow> \<Gamma> \<turnstile> (G \<inter> {s. s \<in> C \<longrightarrow> s \<in> Cnd}) While C (Seq B setter) G"
|
||||
apply (drule hoare_sound)+
|
||||
apply (rule hoare_complete)
|
||||
apply (clarsimp simp: cvalid_def HoarePartialDef.valid_def)
|
||||
by (rule exec_While_final_inv''
|
||||
[where I="\<lambda>s s'. s \<in> G \<and> (s \<in> C \<longrightarrow> s \<in> Cnd) \<longrightarrow> s' \<in> Normal ` G", THEN impE],
|
||||
(fastforce dest!: intermediate_Normal_state[where c\<^sub>1=B and P="G \<inter> Cnd" and Q=G]
|
||||
intro: hoare_complete
|
||||
simp: cvalid_def HoarePartialDef.valid_def)+)
|
||||
|
||||
lemmas hoarep_Int_pre_fix = hoarep_Int[where P=P and P'=P for P, simplified]
|
||||
|
||||
text \<open>
|
||||
This rule is intended to be used to show correspondence between a Haskell whileLoop and a C
|
||||
while loop, where in particular, the C while loop is parsed into a Simpl While loop that
|
||||
updates the cstate as part of the loop condition. In such a case, the CParser will produce a Simpl
|
||||
program in the form that is seen in the conclusion of this rule.\<close>
|
||||
lemma ccorres_While:
|
||||
assumes body_ccorres:
|
||||
"\<And>r. ccorresG srel \<Gamma> (=) xf (G and (\<lambda>s. the (C r s))) (G' \<inter> C') hs (B r) B'"
|
||||
and cond_ccorres:
|
||||
"\<And>r. ccorresG srel \<Gamma> (\<lambda>rv rv'. rv = to_bool rv') cond_xf G G' hs (gets_the (C r)) setter"
|
||||
and nf: "\<And>r. no_fail (G and (\<lambda>s. the (C r s))) (B r)"
|
||||
and no_ofail: "\<And>r. no_ofail G (C r)"
|
||||
and body_inv: "\<And>r. \<lbrace>G and (\<lambda>s. the (C r s))\<rbrace> B r \<lbrace>\<lambda>_. G\<rbrace>"
|
||||
"\<Gamma> \<turnstile> (G' \<inter> C') B' G'"
|
||||
and setter_inv_cond: "\<Gamma> \<turnstile> G' setter (G' \<inter> {s'. cond_xf s' \<noteq> 0 \<longrightarrow> s' \<in> C'})"
|
||||
and setter_xf_inv: "\<And>val. \<Gamma> \<turnstile> {s'. xf s' = val} setter {s'. xf s' = val}"
|
||||
shows "ccorresG srel \<Gamma> (=) xf G (G' \<inter> {s'. xf s' = r}) hs
|
||||
(whileLoop (\<lambda>r s. the (C r s)) B r)
|
||||
(Seq setter (While {s'. cond_xf s' \<noteq> 0} (Seq B' setter)))"
|
||||
"\<And>r. ccorresG srel \<Gamma> rrel xf
|
||||
(\<lambda>s. G r s \<and> C r s = Some True) (G' \<inter> C' \<inter> {s'. rrel r (xf s')}) []
|
||||
(B r) B'"
|
||||
assumes cond_ccorres:
|
||||
"\<And>r. ccorresG srel \<Gamma> (\<lambda>rv rv'. rv = to_bool rv') cond_xf
|
||||
(G r) (G' \<inter> {s'. rrel r (xf s')}) []
|
||||
(gets_the (C r)) cond"
|
||||
assumes nf: "\<And>r. no_fail (\<lambda>s. G r s \<and> C r s = Some True) (B r)"
|
||||
assumes no_ofail: "\<And>r. no_ofail (G r) (C r)"
|
||||
assumes body_inv:
|
||||
"\<And>r. \<lbrace>\<lambda>s. G r s \<and> C r s = Some True\<rbrace> B r \<lbrace>G\<rbrace>"
|
||||
"\<And>r s. \<Gamma> \<turnstile> {s'. s' \<in> G' \<and> (s, s') \<in> srel \<and> G r s \<and> rrel r (xf s')
|
||||
\<and> s' \<in> C' \<and> C r s = Some True}
|
||||
B' G'"
|
||||
assumes cond_hoarep:
|
||||
"\<And>r s. \<Gamma> \<turnstile> {s'. s' \<in> G' \<and> (s, s') \<in> srel \<and> G r s \<and> rrel r (xf s')}
|
||||
cond
|
||||
{s'. s' \<in> G' \<and> (cond_xf s' \<noteq> 0 \<longrightarrow> s' \<in> C') \<and> rrel r (xf s')}"
|
||||
shows
|
||||
"ccorresG srel \<Gamma> rrel xf (G r) (G' \<inter> {s'. rrel r (xf s')}) hs
|
||||
(whileLoop (\<lambda>r s. the (C r s)) B r)
|
||||
(cond;; While {s'. cond_xf s' \<noteq> 0} (B';; cond))"
|
||||
proof -
|
||||
note unif_rrel_def[simp add] to_bool_def[simp add]
|
||||
have helper:
|
||||
"\<And>state xstate'.
|
||||
\<Gamma> \<turnstile> \<langle>While {s'. cond_xf s' \<noteq> 0} (Seq B' setter), Normal state\<rangle> \<Rightarrow> xstate' \<Longrightarrow>
|
||||
\<Gamma> \<turnstile> \<langle>While {s'. cond_xf s' \<noteq> 0} (Seq B' cond), Normal state\<rangle> \<Rightarrow> xstate' \<Longrightarrow>
|
||||
\<forall>st r s. Normal st = xstate' \<and> (s, state) \<in> srel
|
||||
\<and> (cond_xf state \<noteq> 0) = the (C r s) \<and> xf state = r \<and> G s
|
||||
\<and> state \<in> G' \<and> (cond_xf state \<noteq> 0 \<longrightarrow> state \<in> C')
|
||||
\<and> (C r s \<noteq> None) \<and> (cond_xf state \<noteq> 0) = the (C r s)
|
||||
\<and> rrel r (xf state) \<and> G r s \<and> state \<in> G' \<and> (cond_xf state \<noteq> 0 \<longrightarrow> state \<in> C')
|
||||
\<longrightarrow> (\<exists>rv s'. (rv, s') \<in> fst (whileLoop (\<lambda>r s. the (C r s)) B r s)
|
||||
\<and> (s', st) \<in> srel \<and> rv = xf st)"
|
||||
apply (erule exec_While_final_inv''; simp)
|
||||
\<and> (s', st) \<in> srel \<and> rrel rv (xf st))"
|
||||
apply (erule_tac C="{s'. cond_xf s' \<noteq> 0}" in exec_While_final_inv''; simp)
|
||||
apply (fastforce simp: whileLoop_cond_fail return_def)
|
||||
apply clarsimp
|
||||
apply (rename_tac t t' t'' s)
|
||||
apply (frule intermediate_Normal_state[where P="G' \<inter> C'"])
|
||||
apply (rename_tac t t' t'' r s y)
|
||||
apply (frule_tac P="{s'. s' \<in> G' \<and> (s, s') \<in> srel \<and> G r s \<and> rrel r (xf s')
|
||||
\<and> s' \<in> C' \<and> (C r s \<noteq> None) \<and> the (C r s)}"
|
||||
in intermediate_Normal_state)
|
||||
apply fastforce
|
||||
apply (fastforce intro: body_inv)
|
||||
apply (fastforce intro: body_inv conseqPre)
|
||||
apply clarsimp
|
||||
apply (rename_tac inter_t)
|
||||
apply (prop_tac "\<exists>s'. (xf inter_t, s') \<in> fst (B (xf t) s) \<and> (s', inter_t) \<in> srel")
|
||||
subgoal by (erule ccorresE[OF body_ccorres])
|
||||
(fastforce simp: no_fail_def nf[simplified no_fail_def] dest: EHOther)+
|
||||
apply (prop_tac "\<exists>rv' s'. rrel rv' (xf inter_t) \<and> (rv', s') \<in> fst (B r s)
|
||||
\<and> (s', inter_t) \<in> srel")
|
||||
apply (insert body_ccorres)[1]
|
||||
apply (drule_tac x=r in meta_spec)
|
||||
apply (erule (1) ccorresE)
|
||||
apply fastforce
|
||||
apply fastforce
|
||||
using nf apply (fastforce simp: no_fail_def)
|
||||
apply (fastforce dest!: EHOther)
|
||||
apply fastforce
|
||||
apply clarsimp
|
||||
apply (prop_tac "G s'")
|
||||
apply (prop_tac "G rv' s'")
|
||||
apply (fastforce dest: use_valid intro: body_inv)
|
||||
apply (prop_tac "inter_t \<in> G'")
|
||||
apply (fastforce dest: hoarep_exec[rotated] intro: body_inv)
|
||||
apply (drule_tac x=rv' in spec)
|
||||
apply (drule_tac x=s' in spec)
|
||||
apply (prop_tac "rrel rv' (xf inter_t)")
|
||||
apply (fastforce dest: hoarep_exec[OF _ _ cond_hoarep, rotated])
|
||||
apply (elim impE)
|
||||
apply (drule_tac s'=inter_t and r1="xf t'" in ccorresE_gets_the[OF cond_ccorres]; assumption?)
|
||||
apply (frule_tac s'=inter_t and r1=rv' in ccorresE_gets_the[OF cond_ccorres]; assumption?)
|
||||
apply fastforce
|
||||
apply (fastforce intro: no_ofail)
|
||||
apply (fastforce dest: EHOther)
|
||||
subgoal by (fastforce dest: hoarep_exec intro: setter_inv_cond)
|
||||
apply (prop_tac "xf inter_t = xf t'")
|
||||
apply (fastforce dest: hoarep_exec[rotated] intro: setter_xf_inv)
|
||||
apply (intro conjI)
|
||||
apply fastforce
|
||||
using no_ofail apply (fastforce elim!: no_ofailD)
|
||||
apply fastforce
|
||||
apply (fastforce dest: hoarep_exec[OF _ _ cond_hoarep, rotated])
|
||||
apply (fastforce dest: hoarep_exec[OF _ _ cond_hoarep, rotated])
|
||||
apply (fastforce dest: hoarep_exec[OF _ _ cond_hoarep, rotated])
|
||||
apply (fastforce dest: hoarep_exec[OF _ _ cond_hoarep, rotated])
|
||||
apply (fastforce simp: whileLoop_def intro: whileLoop_results.intros(3))
|
||||
done
|
||||
|
||||
have cond_hoarep':
|
||||
"\<And>r s s' n xstate.
|
||||
\<Gamma>\<turnstile>\<^sub>h \<langle>(cond;; While {s'. cond_xf s' \<noteq> 0} (B';; cond)) # hs,s'\<rangle> \<Rightarrow> (n, xstate)
|
||||
\<Longrightarrow> \<Gamma>\<turnstile> {s' \<in> G'. (s, s') \<in> srel \<and> G r s \<and> rrel r (xf s')}
|
||||
cond
|
||||
{s'. (s' \<in> G' \<and> (s, s') \<in> srel \<and> G r s \<and> rrel r (xf s'))
|
||||
\<and> (cond_xf s' \<noteq> 0 \<longrightarrow> (s' \<in> C' \<and> C r s = Some True))}"
|
||||
apply (insert cond_ccorres)
|
||||
apply (drule_tac x=r in meta_spec)
|
||||
apply (frule_tac s=s in ccorres_to_vcg_gets_the)
|
||||
apply (fastforce intro: no_ofail)
|
||||
apply (insert cond_hoarep)
|
||||
apply (drule_tac x=s in meta_spec)
|
||||
apply (drule_tac x=r in meta_spec)
|
||||
apply (rule hoarep_conj_lift_pre_fix)
|
||||
apply (rule hoarep_conj_lift_pre_fix)
|
||||
apply (insert cond_hoarep)[1]
|
||||
apply (fastforce simp: conseq_under_new_pre)
|
||||
apply (fastforce intro!: hoarep_conj_lift_pre_fix simp: Collect_mono conseq_under_new_pre)
|
||||
apply (insert cond_hoarep)
|
||||
apply (drule_tac x=s in meta_spec)
|
||||
apply (drule_tac x=r in meta_spec)
|
||||
apply (simp add: imp_conjR)
|
||||
apply (rule hoarep_conj_lift_pre_fix)
|
||||
apply (simp add: Collect_mono conseq_under_new_pre)
|
||||
apply (rule_tac Q'="{s'. C r s \<noteq> None \<and> the (C r s) = (cond_xf s' \<noteq> 0)}"
|
||||
in conseqPost[rotated])
|
||||
apply fastforce
|
||||
apply fastforce
|
||||
apply (simp add: Collect_mono conseq_under_new_pre)
|
||||
done
|
||||
|
||||
have cond_inv_guard:
|
||||
"\<And>r s. \<Gamma> \<turnstile> {s'. s' \<in> G' \<and> (s, s') \<in> srel \<and> G r s \<and> rrel r (xf s')}
|
||||
cond
|
||||
{s'. s' \<in> G' \<and> (cond_xf s' \<noteq> 0 \<longrightarrow> s' \<in> C')}"
|
||||
by (fastforce intro: conseqPost cond_hoarep)
|
||||
|
||||
show ?thesis
|
||||
apply (clarsimp simp: ccorres_underlying_def)
|
||||
apply (rename_tac s s' n xstate)
|
||||
apply (frule (1) exec_handlers_use_hoare_nothrow_hoarep)
|
||||
apply (rule_tac R="G' \<inter> {s'. s' \<in> {t. cond_xf t \<noteq> 0} \<longrightarrow> s' \<in> C'}" in HoarePartial.Seq)
|
||||
apply (fastforce intro: setter_inv_cond)
|
||||
apply (fastforce intro: While_inv_from_body_setter setter_inv_cond body_inv)
|
||||
apply clarsimp
|
||||
apply (frule (1) intermediate_Normal_state)
|
||||
apply (fastforce intro: setter_inv_cond)
|
||||
apply (frule_tac R'="{s'. s' \<in> G' \<and> (s, s') \<in> srel \<and> G r s \<and> rrel r (xf s')}"
|
||||
and Q'="{s'. \<exists>rv s. s' \<in> G' \<and> (s, s') \<in> srel \<and> G rv s \<and> rrel rv (xf s')}"
|
||||
in exec_handlers_use_hoare_nothrow_hoarep)
|
||||
apply fastforce
|
||||
apply (rule HoarePartial.Seq)
|
||||
apply (erule cond_hoarep')
|
||||
apply (rule conseqPre)
|
||||
apply (rule ccorres_While_Normal_helper)
|
||||
apply (fastforce intro!: cond_hoarep' hoarep_ex_lift)
|
||||
apply (intro hoarep_ex_pre, rename_tac rv new_s)
|
||||
apply (insert cond_inv_guard)[1]
|
||||
apply (drule_tac x=new_s in meta_spec)
|
||||
apply (drule_tac x=rv in meta_spec)
|
||||
apply (insert body_ccorres)[1]
|
||||
apply (drule_tac x=rv in meta_spec)
|
||||
apply (insert body_inv(2))[1]
|
||||
apply (drule_tac x=new_s in meta_spec)
|
||||
apply (drule_tac x=rv in meta_spec)
|
||||
apply (frule_tac s=new_s in ccorres_to_vcg_with_prop)
|
||||
using nf apply fastforce
|
||||
using body_inv apply fastforce
|
||||
apply (rule_tac Q'="{s'. s' \<in> G'
|
||||
\<and> (\<exists>(rv, s) \<in>fst (B rv new_s). (s, s') \<in> srel \<and> rrel rv (xf s')
|
||||
\<and> G rv s)}"
|
||||
in conseqPost;
|
||||
fastforce?)
|
||||
apply (rule hoarep_conj_lift_pre_fix;
|
||||
fastforce simp: Collect_mono conseq_under_new_pre)
|
||||
apply fastforce
|
||||
apply (case_tac xstate; clarsimp)
|
||||
apply (frule intermediate_Normal_state[OF _ _ cond_hoarep]; assumption?)
|
||||
apply fastforce
|
||||
apply clarsimp
|
||||
apply (rename_tac inter_t)
|
||||
apply (drule (2) ccorresE_gets_the[OF cond_ccorres _ _ _ no_ofail])
|
||||
apply (insert cond_ccorres)
|
||||
apply (drule_tac x=r in meta_spec)
|
||||
apply (drule (2) ccorresE_gets_the)
|
||||
apply fastforce
|
||||
apply (fastforce intro: no_ofail)
|
||||
apply (fastforce dest: EHOther)
|
||||
apply (prop_tac "xf inter_t = xf s'")
|
||||
apply (fastforce dest: hoarep_exec[rotated] intro: setter_xf_inv)
|
||||
apply (clarsimp simp: isNormal_def)
|
||||
apply (auto dest: hoarep_exec dest!: helper spec intro: setter_inv_cond)
|
||||
apply (prop_tac "rrel r (xf inter_t)")
|
||||
apply (fastforce dest: hoarep_exec[rotated] intro: cond_hoarep)
|
||||
apply (case_tac "\<not> the (C r s)")
|
||||
apply (fastforce elim: exec_Normal_elim_cases simp: whileLoop_cond_fail return_def)
|
||||
apply (insert no_ofail)
|
||||
apply (fastforce dest!: helper hoarep_exec[OF _ _ cond_inv_guard, rotated] no_ofailD)
|
||||
done
|
||||
qed
|
||||
|
||||
|
@ -1177,4 +1266,38 @@ lemma ccorres_inr_rrel_Inr[simp]:
|
|||
= ccorres_underlying srel \<Gamma> r xf ar axf P Q hs m c"
|
||||
by (simp cong: ccorres_context_cong)+
|
||||
|
||||
lemma add_remove_return:
|
||||
"getter >>= setter = do (do val \<leftarrow> getter; setter val; return val od); return () od"
|
||||
by (simp add: bind_assoc)
|
||||
|
||||
lemma ccorres_call_getter_setter_dc:
|
||||
assumes cul: "ccorresG sr \<Gamma> r' xf' P (i ` P') [] getter (Call f)"
|
||||
and gsr: "\<And>x x' s t rv.
|
||||
\<lbrakk> (x, t) \<in> sr; r' rv (xf' t); ((), x') \<in> fst (setter rv x) \<rbrakk>
|
||||
\<Longrightarrow> (x', g s t (clean s t)) \<in> sr"
|
||||
and ist: "\<And>x s. (x, s) \<in> sr \<Longrightarrow> (x, i s) \<in> sr"
|
||||
and ef: "\<And>val. empty_fail (setter val)"
|
||||
shows "ccorresG sr \<Gamma> dc xfdc P P' hs
|
||||
(getter >>= setter)
|
||||
(call i f clean (\<lambda>s t. Basic (g s t)))"
|
||||
apply (rule ccorres_guard_imp)
|
||||
apply (rule monadic_rewrite_ccorres_assemble[rotated])
|
||||
apply (rule monadic_rewrite_is_refl)
|
||||
apply (rule add_remove_return)
|
||||
apply (rule ccorres_seq_skip'[THEN iffD1])
|
||||
apply (rule ccorres_split_nothrow_novcg)
|
||||
apply (rule ccorres_call_getter_setter)
|
||||
apply (fastforce intro: cul)
|
||||
apply (fastforce intro: gsr)
|
||||
apply (simp add: gsr)
|
||||
apply (fastforce intro: ist)
|
||||
apply (fastforce intro: ef)
|
||||
apply (rule ceqv_refl)
|
||||
apply (fastforce intro: ccorres_return_Skip)
|
||||
apply wpsimp
|
||||
apply (clarsimp simp: guard_is_UNIV_def)
|
||||
apply wpsimp
|
||||
apply fastforce
|
||||
done
|
||||
|
||||
end
|
||||
|
|
|
@ -288,26 +288,37 @@ lemma ccorres_from_vcg_nofail:
|
|||
apply (rule hoare_complete, simp add: HoarePartialDef.valid_def)
|
||||
done
|
||||
|
||||
|
||||
lemma ccorres_to_vcg:
|
||||
"ccorres_underlying srel \<Gamma> rrel xf arrel axf P P' [] a c \<Longrightarrow>
|
||||
(\<forall>\<sigma>. \<not> snd (a \<sigma>) \<longrightarrow> \<Gamma> \<turnstile> {s. P \<sigma> \<and> s \<in> P' \<and> (\<sigma>, s) \<in> srel}
|
||||
c
|
||||
{s. (\<exists>(rv, \<sigma>') \<in> fst (a \<sigma>). (\<sigma>', s) \<in> srel \<and> rrel rv (xf s))})"
|
||||
apply -
|
||||
apply rule
|
||||
apply (rule impI)
|
||||
lemma ccorres_to_vcg_with_prop:
|
||||
"\<lbrakk>ccorres_underlying srel \<Gamma> rrel xf arrel axf P P' [] a c; no_fail Q a; \<lbrace>P\<rbrace> a \<lbrace>R\<rbrace>\<rbrakk>
|
||||
\<Longrightarrow> \<Gamma> \<turnstile> {s'. P s \<and> Q s \<and> s' \<in> P' \<and> (s, s') \<in> srel}
|
||||
c {t'. \<exists>(rv, t) \<in> fst (a s). (t, t') \<in> srel \<and> rrel rv (xf t') \<and> R rv t}"
|
||||
apply (rule hoare_complete)
|
||||
apply (simp add: HoarePartialDef.valid_def cvalid_def)
|
||||
apply (intro impI allI)
|
||||
apply (clarsimp simp: HoarePartialDef.valid_def cvalid_def no_fail_def)
|
||||
apply (drule_tac x=s in spec)
|
||||
apply clarsimp
|
||||
apply (frule (5) ccorres_empty_handler_abrupt)
|
||||
apply (erule (4) ccorresE)
|
||||
apply (erule (1) EHOther)
|
||||
apply clarsimp
|
||||
apply rule
|
||||
apply simp
|
||||
apply (fastforce simp: unif_rrel_simps)
|
||||
apply (fastforce elim!: ccorresE EHOther simp: unif_rrel_simps valid_def)
|
||||
done
|
||||
|
||||
lemma ccorres_to_vcg:
|
||||
"\<lbrakk>ccorres_underlying srel \<Gamma> rrel xf arrel axf P P' [] a c; no_fail Q a\<rbrakk>
|
||||
\<Longrightarrow> \<Gamma> \<turnstile> {s'. P s \<and> Q s \<and> s' \<in> P' \<and> (s, s') \<in> srel}
|
||||
c {t'. \<exists>(rv, t) \<in> fst (a s). (t, t') \<in> srel \<and> rrel rv (xf t')}"
|
||||
apply (frule (1) ccorres_to_vcg_with_prop[where R="\<top>\<top>"])
|
||||
apply wpsimp
|
||||
apply fastforce
|
||||
done
|
||||
|
||||
lemma ccorres_to_vcg_gets_the:
|
||||
"\<lbrakk>ccorres_underlying srel \<Gamma> rrel xf arrel axf P P' [] (gets_the r) c; no_ofail P r\<rbrakk>
|
||||
\<Longrightarrow> \<Gamma> \<turnstile> (P' \<inter> {s'. (s, s') \<in> srel \<and> P s})
|
||||
c {t'. (s, t') \<in> srel \<and> P s \<and> (r s \<noteq> None) \<and> rrel (the (r s)) (xf t')}"
|
||||
apply (frule ccorres_to_vcg_with_prop[where R="\<lambda>_. P" and s=s])
|
||||
apply (erule no_ofail_gets_the)
|
||||
apply wpsimp
|
||||
apply (clarsimp simp: gets_the_def simpler_gets_def bind_def return_def get_def assert_opt_def
|
||||
fail_def conseq_under_new_pre
|
||||
split: option.splits)
|
||||
done
|
||||
|
||||
lemma exec_handlers_Seq_cases0':
|
||||
|
@ -830,14 +841,14 @@ text \<open>
|
|||
between the values set on the concrete and abstract side. The @{thm bind_assoc_return_reverse} rule
|
||||
may assist with rewriting statements to add the extra return needed by this rule\<close>
|
||||
lemma ccorres_call_getter_setter:
|
||||
assumes cul: "ccorresG sr \<Gamma> (=) xf' P (i ` P') [] getter (Call f)"
|
||||
assumes cul: "ccorresG sr \<Gamma> r' xf' P (i ` P') [] getter (Call f)"
|
||||
and gsr: "\<And>x x' s t rv rv'.
|
||||
\<lbrakk> (x, t) \<in> sr; rv = xf' t; (rv', x') \<in> fst (setter rv x) \<rbrakk>
|
||||
\<lbrakk> (x, t) \<in> sr; r' rv (xf' t); (rv', x') \<in> fst (setter rv x) \<rbrakk>
|
||||
\<Longrightarrow> (x', g s t (clean s t)) \<in> sr"
|
||||
and res: "\<And>s t rv. rv = xf' t \<Longrightarrow> rv = xf (g s t (clean s t))"
|
||||
and res: "\<And>s t rv. r' rv (xf' t) \<Longrightarrow> r rv (xf (g s t (clean s t)))"
|
||||
and ist: "\<And>x s. (x, s) \<in> sr \<Longrightarrow> (x, i s) \<in> sr"
|
||||
and ef: "\<And>val. empty_fail (setter val)"
|
||||
shows "ccorresG sr \<Gamma> (=) xf P P' hs
|
||||
shows "ccorresG sr \<Gamma> r xf P P' hs
|
||||
(do val \<leftarrow> getter; setter val; return val od)
|
||||
(call i f clean (\<lambda>s t. Basic (g s t)))"
|
||||
apply (rule ccorresI')
|
||||
|
@ -1402,14 +1413,6 @@ lemma ccorres_move_Guard:
|
|||
|
||||
section "novcg"
|
||||
|
||||
lemma ccorres_to_vcg':
|
||||
"\<lbrakk> ccorres_underlying srel \<Gamma> rrel xf arrel axf P P' [] a c; \<not> snd (a \<sigma>) \<rbrakk> \<Longrightarrow>
|
||||
\<Gamma>\<turnstile> {s. P \<sigma> \<and> s \<in> P' \<and> (\<sigma>, s) \<in> srel} c
|
||||
{s. \<exists>(rv, \<sigma>')\<in>fst (a \<sigma>). (\<sigma>', s) \<in> srel \<and> rrel rv (xf s)}"
|
||||
apply (drule ccorres_to_vcg)
|
||||
apply clarsimp
|
||||
done
|
||||
|
||||
lemma exec_handlers_Hoare_UNIV:
|
||||
"guard_is_UNIV r xf Q \<Longrightarrow>
|
||||
exec_handlers_Hoare \<Gamma> UNIV cs (ccHoarePost r xf Q) UNIV"
|
||||
|
|
|
@ -72,6 +72,7 @@ lemma hoarep_Int:
|
|||
apply fastforce
|
||||
done
|
||||
|
||||
lemmas hoarep_Int_pre_fix = hoarep_Int[where P=P and P'=P for P, simplified]
|
||||
|
||||
lemma Normal_result:
|
||||
"\<Gamma> \<turnstile> \<langle>c, s\<rangle> \<Rightarrow> Normal t' \<Longrightarrow> \<exists>t. s = Normal t"
|
||||
|
@ -350,5 +351,62 @@ lemma hoarep_revert:
|
|||
apply simp
|
||||
done
|
||||
|
||||
lemma intermediate_Normal_state:
|
||||
"\<lbrakk>\<Gamma> \<turnstile> \<langle>Seq c\<^sub>1 c\<^sub>2, Normal t\<rangle> \<Rightarrow> t''; t \<in> P; \<Gamma> \<turnstile> P c\<^sub>1 Q\<rbrakk>
|
||||
\<Longrightarrow> \<exists>t'. \<Gamma> \<turnstile> \<langle>c\<^sub>1, Normal t\<rangle> \<Rightarrow> Normal t' \<and> \<Gamma> \<turnstile> \<langle>c\<^sub>2, Normal t'\<rangle> \<Rightarrow> t''"
|
||||
apply (erule exec_Normal_elim_cases(8))
|
||||
apply (insert hoarep_exec)
|
||||
apply fastforce
|
||||
done
|
||||
|
||||
lemma hoarep_ex_pre:
|
||||
"(\<And>x. \<Gamma> \<turnstile> {s. P x s} c Q) \<Longrightarrow> \<Gamma> \<turnstile> {s. \<exists>x. P x s} c Q"
|
||||
apply (rule hoare_complete)
|
||||
apply (clarsimp simp: cvalid_def HoarePartialDef.valid_def)
|
||||
apply (fastforce dest: hoarep_exec'[rotated])
|
||||
done
|
||||
|
||||
lemma hoarep_ex_lift:
|
||||
"(\<And>x. \<Gamma> \<turnstile> {s. P x s} c {s. Q x s}) \<Longrightarrow> \<Gamma> \<turnstile> {s. \<exists>x. P x s} c {s. \<exists>x. Q x s}"
|
||||
apply (rule hoare_complete)
|
||||
apply (clarsimp simp: cvalid_def HoarePartialDef.valid_def)
|
||||
apply (rename_tac s x)
|
||||
apply (drule_tac x=x in meta_spec)
|
||||
apply (prop_tac "s \<in> Collect (P x)")
|
||||
apply fastforce
|
||||
apply (frule (2) hoarep_exec)
|
||||
apply fastforce
|
||||
done
|
||||
|
||||
lemma hoarep_conj_lift_pre_fix:
|
||||
"\<lbrakk>\<Gamma> \<turnstile> P c {s. Q s}; \<Gamma> \<turnstile> P c {s. Q' s}\<rbrakk>
|
||||
\<Longrightarrow> \<Gamma> \<turnstile> P c {s. Q s \<and> Q' s}"
|
||||
apply (rule hoare_complete)
|
||||
apply (clarsimp simp: cvalid_def HoarePartialDef.valid_def)
|
||||
apply (frule (2) hoarep_exec[where Q="Collect Q"])
|
||||
apply (frule (2) hoarep_exec[where Q="Collect Q'"])
|
||||
apply fastforce
|
||||
done
|
||||
|
||||
lemma exec_While_final_inv'':
|
||||
"\<lbrakk> \<Gamma> \<turnstile> \<langle>b, x\<rangle> \<Rightarrow> s'; b = While C B; x = Normal s;
|
||||
\<And>s. s \<notin> C \<Longrightarrow> I s (Normal s);
|
||||
\<And>t t' t''. \<lbrakk> t \<in> C; \<Gamma>\<turnstile> \<langle>B, Normal t\<rangle> \<Rightarrow> Normal t'; \<Gamma>\<turnstile> \<langle>While C B, Normal t'\<rangle> \<Rightarrow> t'';
|
||||
I t' t'' \<rbrakk> \<Longrightarrow> I t t'';
|
||||
\<And>t t'. \<lbrakk> t \<in> C; \<Gamma>\<turnstile> \<langle>B, Normal t\<rangle> \<Rightarrow> Abrupt t' \<rbrakk> \<Longrightarrow> I t (Abrupt t');
|
||||
\<And>t. \<lbrakk> t \<in> C; \<Gamma> \<turnstile> \<langle>B, Normal t\<rangle> \<Rightarrow> Stuck \<rbrakk> \<Longrightarrow> I t Stuck;
|
||||
\<And>t f. \<lbrakk> t \<in> C; \<Gamma>\<turnstile> \<langle>B, Normal t\<rangle> \<Rightarrow> Fault f \<rbrakk> \<Longrightarrow> I t (Fault f) \<rbrakk>
|
||||
\<Longrightarrow> I s s'"
|
||||
apply (induct arbitrary: s rule: exec.induct; simp)
|
||||
apply (erule exec_elim_cases; fastforce simp: exec.WhileTrue exec.WhileFalse)
|
||||
done
|
||||
|
||||
lemma While_inv_from_body:
|
||||
"\<Gamma> \<turnstile> (G \<inter> C) B G \<Longrightarrow> \<Gamma> \<turnstile> G While C B G"
|
||||
apply (drule hoare_sound)+
|
||||
apply (rule hoare_complete)
|
||||
apply (clarsimp simp: cvalid_def HoarePartialDef.valid_def)
|
||||
by (erule exec_While_final_inv''[where I="\<lambda>s s'. s \<in> G \<longrightarrow> s' \<in> Normal ` G", THEN impE],
|
||||
fastforce+)
|
||||
|
||||
end
|
||||
|
|
|
@ -460,7 +460,7 @@ text \<open>Methods to automate rewriting.\<close>
|
|||
|
||||
method do_rewrite uses hom ruleset declares C_simp_simps =
|
||||
(rule com_ctxt_focus_rewrite[OF hom], rule ruleset,
|
||||
#break "simpl_rewrite_rewrite", (simp add: C_simp_simps; fail))+
|
||||
#break "simpl_rewrite_rewrite", (not_visible \<open>simp add: C_simp_simps\<close>; fail))+
|
||||
|
||||
method rewrite_pre uses hom declares C_simp_pre C_simp_simps =
|
||||
(do_rewrite hom: hom ruleset: C_simp_pre)
|
||||
|
|
|
@ -1,13 +0,0 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# Copyright 2020, Data61, CSIRO (ABN 41 687 119 230)
|
||||
#
|
||||
# SPDX-License-Identifier: BSD-2-Clause
|
||||
#
|
||||
|
||||
#
|
||||
# Wrapper for clang C preprocessor on MacOS
|
||||
#
|
||||
export L4CPP="-DTARGET=ARM -DTARGET_ARM -DPLATFORM=Sabre -DPLATFORM_Sabre"
|
||||
|
||||
llvm-gcc -Wno-invalid-pp-token -E -x c $@
|
|
@ -10,9 +10,9 @@ default: images test
|
|||
test:
|
||||
all: images test
|
||||
|
||||
# Allow sorry command in AARCH64 Refine during development:
|
||||
# Allow sorry command in AARCH64 CRefine during development:
|
||||
ifeq "$(L4V_ARCH)" "AARCH64"
|
||||
export REFINE_QUICK_AND_DIRTY=1
|
||||
export CREFINE_QUICK_AND_DIRTY=1
|
||||
endif
|
||||
|
||||
#
|
||||
|
|
|
@ -4557,14 +4557,14 @@ lemma decodeVCPUWriteReg_ccorres:
|
|||
(invs' and (\<lambda>s. ksCurThread s = thread) and ct_active' and sch_act_simple
|
||||
and sysargs_rel args buffer
|
||||
and (valid_cap' (ArchObjectCap cp)) and K (isVCPUCap cp))
|
||||
(UNIV \<inter> {s. unat (length_' s) = length args}
|
||||
(UNIV \<inter> {s. unat (length___unsigned_long_' s) = length args}
|
||||
\<inter> {s. ccap_relation (ArchObjectCap cp) (cap_' s)}
|
||||
\<inter> {s. buffer_' s = option_to_ptr buffer}) hs
|
||||
(decodeVCPUWriteReg args cp
|
||||
>>= invocationCatch thread isBlocking isCall InvokeArchObject)
|
||||
(Call decodeVCPUWriteReg_'proc)"
|
||||
apply (rule ccorres_grab_asm)
|
||||
apply (cinit' lift: length_' cap_' buffer_' simp: decodeVCPUWriteReg_def Let_def)
|
||||
apply (cinit' lift: length___unsigned_long_' cap_' buffer_' simp: decodeVCPUWriteReg_def Let_def)
|
||||
apply (rule ccorres_Cond_rhs_Seq ; clarsimp)
|
||||
apply (rule_tac ccorres_gen_asm[where P="length args < 2"])
|
||||
apply clarsimp
|
||||
|
@ -4688,7 +4688,7 @@ lemma decodeVCPUInjectIRQ_ccorres:
|
|||
and sysargs_rel args buffer
|
||||
and (valid_cap' (ArchObjectCap cp))
|
||||
and K (isVCPUCap cp))
|
||||
(UNIV \<inter> {s. unat (length_' s) = length args}
|
||||
(UNIV \<inter> {s. unat (length___unsigned_long_' s) = length args}
|
||||
\<inter> {s. ccap_relation (ArchObjectCap cp) (cap_' s)}
|
||||
\<inter> {s. buffer_' s = option_to_ptr buffer}
|
||||
) hs
|
||||
|
@ -4696,7 +4696,7 @@ lemma decodeVCPUInjectIRQ_ccorres:
|
|||
>>= invocationCatch thread isBlocking isCall InvokeArchObject)
|
||||
(Call decodeVCPUInjectIRQ_'proc)"
|
||||
apply (rule ccorres_grab_asm)
|
||||
apply (cinit' lift: length_' cap_' buffer_'
|
||||
apply (cinit' lift: length___unsigned_long_' cap_' buffer_'
|
||||
simp: decodeVCPUInjectIRQ_def Let_def shiftL_nat )
|
||||
apply csymbr
|
||||
apply csymbr
|
||||
|
@ -4904,14 +4904,14 @@ lemma decodeVCPUReadReg_ccorres:
|
|||
(invs' and (\<lambda>s. ksCurThread s = thread) and ct_active' and sch_act_simple
|
||||
and sysargs_rel args buffer
|
||||
and (valid_cap' (ArchObjectCap cp)))
|
||||
(UNIV \<inter> {s. unat (length_' s) = length args}
|
||||
(UNIV \<inter> {s. unat (length___unsigned_long_' s) = length args}
|
||||
\<inter> {s. ccap_relation (ArchObjectCap cp) (cap_' s)}
|
||||
\<inter> {s. buffer_' s = option_to_ptr buffer}
|
||||
\<inter> \<lbrace>\<acute>call = from_bool isCall \<rbrace>) hs
|
||||
(decodeVCPUReadReg args cp
|
||||
>>= invocationCatch thread isBlocking isCall InvokeArchObject)
|
||||
(Call decodeVCPUReadReg_'proc)"
|
||||
apply (cinit' lift: length_' cap_' buffer_' call_')
|
||||
apply (cinit' lift: length___unsigned_long_' cap_' buffer_' call_')
|
||||
apply (clarsimp simp: decodeVCPUReadReg_def Let_def)
|
||||
apply (rule ccorres_Cond_rhs_Seq)
|
||||
apply (simp add: throwError_bind invocationCatch_def invocation_eq_use_types
|
||||
|
@ -5120,7 +5120,7 @@ lemma decodeVCPUAckVPPI_ccorres:
|
|||
and sysargs_rel args buffer
|
||||
and (valid_cap' (ArchObjectCap cp))
|
||||
and K (isVCPUCap cp))
|
||||
(UNIV \<inter> {s. unat (length_' s) = length args}
|
||||
(UNIV \<inter> {s. unat (length___unsigned_long_' s) = length args}
|
||||
\<inter> {s. ccap_relation (ArchObjectCap cp) (cap_' s)}
|
||||
\<inter> {s. buffer_' s = option_to_ptr buffer}
|
||||
) hs
|
||||
|
@ -5142,7 +5142,7 @@ proof -
|
|||
|
||||
show ?thesis
|
||||
apply (rule ccorres_grab_asm)
|
||||
apply (cinit' lift: length_' cap_' buffer_')
|
||||
apply (cinit' lift: length___unsigned_long_' cap_' buffer_')
|
||||
apply (clarsimp simp: decodeVCPUAckVPPI_def)
|
||||
apply (csymbr, rename_tac cp')
|
||||
apply csymbr
|
||||
|
@ -5241,7 +5241,7 @@ lemma decodeARMVCPUInvocation_ccorres:
|
|||
and (valid_cap' (ArchObjectCap cp)))
|
||||
(UNIV \<comment> \<open>whoever wrote the C code decided to name this arbitrarily differently from other functions\<close>
|
||||
\<inter> {s. label___unsigned_long_' s = label}
|
||||
\<inter> {s. unat (length_' s) = length args}
|
||||
\<inter> {s. unat (length___unsigned_long_' s) = length args}
|
||||
\<inter> {s. slot_' s = cte_Ptr slot}
|
||||
\<inter> {s. current_extra_caps_' (globals s) = extraCaps'}
|
||||
\<inter> {s. ccap_relation (ArchObjectCap cp) (cap_' s)}
|
||||
|
@ -5250,7 +5250,7 @@ lemma decodeARMVCPUInvocation_ccorres:
|
|||
(decodeARMVCPUInvocation label args cptr slot cp extraCaps
|
||||
>>= invocationCatch thread isBlocking isCall InvokeArchObject)
|
||||
(Call decodeARMVCPUInvocation_'proc)"
|
||||
apply (cinit' lift: label___unsigned_long_' length_' slot_' current_extra_caps_'
|
||||
apply (cinit' lift: label___unsigned_long_' length___unsigned_long_' slot_' current_extra_caps_'
|
||||
cap_' buffer_' call_')
|
||||
apply (clarsimp simp: decodeARMVCPUInvocation_def)
|
||||
|
||||
|
|
|
@ -2422,21 +2422,6 @@ next
|
|||
done
|
||||
qed
|
||||
|
||||
(* FIXME: move *)
|
||||
lemma ccorres_to_vcg_nf:
|
||||
"\<lbrakk>ccorres rrel xf P P' [] a c; no_fail Q a; \<And>s. P s \<Longrightarrow> Q s\<rbrakk>
|
||||
\<Longrightarrow> \<Gamma>\<turnstile> {s. P \<sigma> \<and> s \<in> P' \<and> (\<sigma>, s) \<in> rf_sr} c
|
||||
{s. \<exists>(rv, \<sigma>')\<in>fst (a \<sigma>). (\<sigma>', s) \<in> rf_sr \<and> rrel rv (xf s)}"
|
||||
apply (rule HoarePartial.conseq_exploit_pre)
|
||||
apply clarsimp
|
||||
apply (rule conseqPre)
|
||||
apply (drule ccorres_to_vcg')
|
||||
prefer 2
|
||||
apply simp
|
||||
apply (simp add: no_fail_def)
|
||||
apply clarsimp
|
||||
done
|
||||
|
||||
lemma mdb_node_get_mdbNext_heap_ccorres:
|
||||
"ccorres (=) ret__unsigned_' \<top> UNIV hs
|
||||
(liftM (mdbNext \<circ> cteMDBNode) (getCTE parent))
|
||||
|
|
|
@ -2225,21 +2225,6 @@ next
|
|||
done
|
||||
qed
|
||||
|
||||
(* FIXME: move *)
|
||||
lemma ccorres_to_vcg_nf:
|
||||
"\<lbrakk>ccorres rrel xf P P' [] a c; no_fail Q a; \<And>s. P s \<Longrightarrow> Q s\<rbrakk>
|
||||
\<Longrightarrow> \<Gamma>\<turnstile> {s. P \<sigma> \<and> s \<in> P' \<and> (\<sigma>, s) \<in> rf_sr} c
|
||||
{s. \<exists>(rv, \<sigma>')\<in>fst (a \<sigma>). (\<sigma>', s) \<in> rf_sr \<and> rrel rv (xf s)}"
|
||||
apply (rule HoarePartial.conseq_exploit_pre)
|
||||
apply clarsimp
|
||||
apply (rule conseqPre)
|
||||
apply (drule ccorres_to_vcg')
|
||||
prefer 2
|
||||
apply simp
|
||||
apply (simp add: no_fail_def)
|
||||
apply clarsimp
|
||||
done
|
||||
|
||||
lemma mdb_node_get_mdbNext_heap_ccorres:
|
||||
"ccorres (=) ret__unsigned_longlong_' \<top> UNIV hs
|
||||
(liftM (mdbNext \<circ> cteMDBNode) (getCTE parent))
|
||||
|
|
|
@ -2771,21 +2771,6 @@ next
|
|||
done
|
||||
qed
|
||||
|
||||
(* FIXME: move *)
|
||||
lemma ccorres_to_vcg_nf:
|
||||
"\<lbrakk>ccorres rrel xf P P' [] a c; no_fail Q a; \<And>s. P s \<Longrightarrow> Q s\<rbrakk>
|
||||
\<Longrightarrow> \<Gamma>\<turnstile> {s. P \<sigma> \<and> s \<in> P' \<and> (\<sigma>, s) \<in> rf_sr} c
|
||||
{s. \<exists>(rv, \<sigma>')\<in>fst (a \<sigma>). (\<sigma>', s) \<in> rf_sr \<and> rrel rv (xf s)}"
|
||||
apply (rule HoarePartial.conseq_exploit_pre)
|
||||
apply clarsimp
|
||||
apply (rule conseqPre)
|
||||
apply (drule ccorres_to_vcg')
|
||||
prefer 2
|
||||
apply simp
|
||||
apply (simp add: no_fail_def)
|
||||
apply clarsimp
|
||||
done
|
||||
|
||||
lemma mdb_node_get_mdbNext_heap_ccorres:
|
||||
"ccorres (=) ret__unsigned_longlong_' \<top> UNIV hs
|
||||
(liftM (mdbNext \<circ> cteMDBNode) (getCTE parent))
|
||||
|
|
|
@ -934,10 +934,10 @@ lemma terminates_spec_no_fail:
|
|||
using normal by auto
|
||||
show ?thesis
|
||||
apply (clarsimp simp: ac AC_call_L1_def L2_call_L1_def)
|
||||
apply (wpsimp wp: select_f_L1_call_simpl_no_fail no_fail_select
|
||||
wp_del: select_f_wp)
|
||||
apply (rule hoare_strengthen_post[OF select_f_L1_call_simpl_rv], fastforce)
|
||||
apply (wpsimp wp: nf_pre)+
|
||||
apply (wpsimp wp_del: select_f_wp)
|
||||
apply (rule hoare_strengthen_post[OF select_f_L1_call_simpl_rv], fastforce)
|
||||
apply (wpsimp wp: select_f_L1_call_simpl_no_fail no_fail_select)+
|
||||
apply (fastforce simp: nf_pre)
|
||||
done
|
||||
qed
|
||||
|
||||
|
|
|
@ -193,23 +193,20 @@ lemma handleInterrupt_no_fail:
|
|||
|
||||
lemma handleEvent_Interrupt_no_fail: "no_fail (invs' and ex_abs einvs) (handleEvent Interrupt)"
|
||||
apply (simp add: handleEvent_def)
|
||||
apply (rule no_fail_pre)
|
||||
apply wp
|
||||
apply (rule handleInterrupt_no_fail)
|
||||
apply (simp add: crunch_simps)
|
||||
apply (rule_tac Q="\<lambda>r s. ex_abs (einvs) s \<and> invs' s \<and>
|
||||
(\<forall>irq. r = Some irq
|
||||
\<longrightarrow> intStateIRQTable (ksInterruptState s) irq \<noteq> irqstate.IRQInactive)"
|
||||
in hoare_strengthen_post)
|
||||
apply (rule hoare_vcg_conj_lift)
|
||||
apply (rule corres_ex_abs_lift)
|
||||
apply (rule dmo_getActiveIRQ_corres)
|
||||
apply wp
|
||||
apply simp
|
||||
apply wp
|
||||
apply simp
|
||||
apply (rule doMachineOp_getActiveIRQ_IRQ_active)
|
||||
apply clarsimp
|
||||
apply (rule handleInterrupt_no_fail)
|
||||
apply (simp add: crunch_simps)
|
||||
apply (rule_tac Q="\<lambda>r s. ex_abs (einvs) s \<and> invs' s \<and>
|
||||
(\<forall>irq. r = Some irq
|
||||
\<longrightarrow> intStateIRQTable (ksInterruptState s) irq \<noteq> irqstate.IRQInactive)"
|
||||
in hoare_strengthen_post)
|
||||
apply (rule hoare_vcg_conj_lift)
|
||||
apply (rule corres_ex_abs_lift)
|
||||
apply (rule dmo_getActiveIRQ_corres)
|
||||
apply wpsimp
|
||||
apply (wpsimp wp: doMachineOp_getActiveIRQ_IRQ_active)
|
||||
apply clarsimp
|
||||
apply wpsimp
|
||||
apply (clarsimp simp: invs'_def valid_state'_def)
|
||||
done
|
||||
|
||||
|
|
|
@ -271,8 +271,7 @@ lemma no_fail_invalidateCacheRange_I[simp, wp]:
|
|||
lemma no_fail_invalidateCacheRange_RAM[simp, wp]:
|
||||
"no_fail \<top> (invalidateCacheRange_RAM s e p)"
|
||||
apply (simp add: invalidateCacheRange_RAM_def lineStart_def cacheLineBits_def)
|
||||
apply (rule no_fail_pre, wp no_fail_invalidateL2Range no_fail_invalidateByVA no_fail_dsb, simp)
|
||||
apply (auto intro: hoare_post_taut)
|
||||
apply (wpsimp wp: no_fail_invalidateL2Range no_fail_invalidateByVA no_fail_dsb)
|
||||
done
|
||||
|
||||
lemma no_fail_branchFlushRange[simp, wp]:
|
||||
|
|
|
@ -280,8 +280,7 @@ lemma no_fail_invalidateCacheRange_I[simp, wp]:
|
|||
lemma no_fail_invalidateCacheRange_RAM[simp, wp]:
|
||||
"no_fail \<top> (invalidateCacheRange_RAM s e p)"
|
||||
apply (simp add: invalidateCacheRange_RAM_def lineStart_def cacheLineBits_def)
|
||||
apply (rule no_fail_pre, wp no_fail_invalidateL2Range no_fail_invalidateByVA no_fail_dsb, simp)
|
||||
apply (auto intro: hoare_post_taut)
|
||||
apply (wpsimp wp: no_fail_invalidateL2Range no_fail_invalidateByVA no_fail_dsb)
|
||||
done
|
||||
|
||||
lemma no_fail_branchFlushRange[simp, wp]:
|
||||
|
|
|
@ -1575,11 +1575,6 @@ lemma corres_assert_ret:
|
|||
apply (simp add: assert_def return_def fail_def)
|
||||
done
|
||||
|
||||
lemma corres_assert_assume_l:
|
||||
"corres dc P Q (f ()) g
|
||||
\<Longrightarrow> corres dc (P and (\<lambda>s. P')) Q (assert P' >>= f) g"
|
||||
by (force simp: corres_underlying_def assert_def return_def bind_def fail_def)
|
||||
|
||||
lemma corres_assert_assume_r:
|
||||
"corres dc P Q f (g ())
|
||||
\<Longrightarrow> corres dc P (Q and (\<lambda>s. Q')) f (assert Q' >>= g)"
|
||||
|
|
|
@ -1599,9 +1599,8 @@ lemma no_fail_asUser [wp]:
|
|||
"no_fail \<top> f \<Longrightarrow> no_fail (tcb_at' t) (asUser t f)"
|
||||
apply (simp add: asUser_def split_def)
|
||||
apply wp
|
||||
apply (simp add: no_fail_def)
|
||||
apply (wp hoare_drop_imps)
|
||||
apply simp
|
||||
apply (simp add: no_fail_def)
|
||||
apply (wpsimp wp: hoare_drop_imps no_fail_threadGet)+
|
||||
done
|
||||
|
||||
lemma asUser_setRegister_corres:
|
||||
|
|
|
@ -1454,11 +1454,6 @@ lemma corres_assert_ret:
|
|||
apply (simp add: assert_def return_def fail_def)
|
||||
done
|
||||
|
||||
lemma corres_assert_assume_l:
|
||||
"corres dc P Q (f ()) g
|
||||
\<Longrightarrow> corres dc (P and (\<lambda>s. P')) Q (assert P' >>= f) g"
|
||||
by (force simp: corres_underlying_def assert_def return_def bind_def fail_def)
|
||||
|
||||
lemma corres_assert_assume_r:
|
||||
"corres dc P Q f (g ())
|
||||
\<Longrightarrow> corres dc P (Q and (\<lambda>s. Q')) f (assert Q' >>= g)"
|
||||
|
|
|
@ -1534,9 +1534,8 @@ lemma no_fail_asUser [wp]:
|
|||
"no_fail \<top> f \<Longrightarrow> no_fail (tcb_at' t) (asUser t f)"
|
||||
apply (simp add: asUser_def split_def)
|
||||
apply wp
|
||||
apply (simp add: no_fail_def)
|
||||
apply (wp hoare_drop_imps)
|
||||
apply simp
|
||||
apply (simp add: no_fail_def)
|
||||
apply (wpsimp wp: hoare_drop_imps no_fail_threadGet)+
|
||||
done
|
||||
|
||||
lemma asUser_setRegister_corres:
|
||||
|
|
|
@ -341,9 +341,7 @@ lemma invokeTCB_WriteRegisters_corres:
|
|||
apply (rule asUser_corres)
|
||||
apply (simp add: zipWithM_mapM getRestartPC_def setNextPC_def)
|
||||
apply (rule corres_Id, simp+)
|
||||
apply (rule no_fail_pre, wp no_fail_mapM)
|
||||
apply clarsimp
|
||||
apply (wp no_fail_setRegister | simp)+
|
||||
apply (wpsimp wp: no_fail_mapM no_fail_setRegister)+
|
||||
apply (rule corres_split_nor[OF asUser_postModifyRegisters_corres[simplified]])
|
||||
apply (rule corres_split_nor[OF corres_when[OF refl restart_corres]])
|
||||
apply (rule corres_split_nor[OF corres_when[OF refl rescheduleRequired_corres]])
|
||||
|
|
|
@ -1550,11 +1550,6 @@ lemma corres_assert_ret:
|
|||
apply (simp add: assert_def return_def fail_def)
|
||||
done
|
||||
|
||||
lemma corres_assert_assume_l:
|
||||
"corres dc P Q (f ()) g
|
||||
\<Longrightarrow> corres dc (P and (\<lambda>s. P')) Q (assert P' >>= f) g"
|
||||
by (force simp: corres_underlying_def assert_def return_def bind_def fail_def)
|
||||
|
||||
lemma corres_assert_assume_r:
|
||||
"corres dc P Q f (g ())
|
||||
\<Longrightarrow> corres dc P (Q and (\<lambda>s. Q')) f (assert Q' >>= g)"
|
||||
|
|
|
@ -1581,9 +1581,8 @@ lemma no_fail_asUser [wp]:
|
|||
"no_fail \<top> f \<Longrightarrow> no_fail (tcb_at' t) (asUser t f)"
|
||||
apply (simp add: asUser_def split_def)
|
||||
apply wp
|
||||
apply (simp add: no_fail_def)
|
||||
apply (wp hoare_drop_imps)
|
||||
apply simp
|
||||
apply (simp add: no_fail_def)
|
||||
apply (wpsimp wp: hoare_drop_imps no_fail_threadGet)+
|
||||
done
|
||||
|
||||
lemma asUser_setRegister_corres:
|
||||
|
|
|
@ -1419,11 +1419,6 @@ lemma corres_assert_ret:
|
|||
apply (simp add: assert_def return_def fail_def)
|
||||
done
|
||||
|
||||
lemma corres_assert_assume_l:
|
||||
"corres dc P Q (f ()) g
|
||||
\<Longrightarrow> corres dc (P and (\<lambda>s. P')) Q (assert P' >>= f) g"
|
||||
by (force simp: corres_underlying_def assert_def return_def bind_def fail_def)
|
||||
|
||||
lemma corres_assert_assume_r:
|
||||
"corres dc P Q f (g ())
|
||||
\<Longrightarrow> corres dc P (Q and (\<lambda>s. Q')) f (assert Q' >>= g)"
|
||||
|
|
|
@ -1568,9 +1568,8 @@ lemma no_fail_asUser [wp]:
|
|||
"no_fail \<top> f \<Longrightarrow> no_fail (tcb_at' t) (asUser t f)"
|
||||
apply (simp add: asUser_def split_def)
|
||||
apply wp
|
||||
apply (simp add: no_fail_def)
|
||||
apply (wp hoare_drop_imps)
|
||||
apply simp
|
||||
apply (simp add: no_fail_def)
|
||||
apply (wpsimp wp: hoare_drop_imps no_fail_threadGet)+
|
||||
done
|
||||
|
||||
lemma asUser_setRegister_corres:
|
||||
|
|
|
@ -329,8 +329,8 @@ lemma invokeTCB_WriteRegisters_corres:
|
|||
apply (clarsimp simp: mask_def user_vtop_def
|
||||
cong: if_cong)
|
||||
apply simp
|
||||
apply (rule no_fail_pre, wp no_fail_mapM)
|
||||
apply (clarsimp, (wp no_fail_setRegister | simp)+)
|
||||
apply (wpsimp wp: no_fail_mapM no_fail_setRegister)
|
||||
apply simp
|
||||
apply (rule corres_split_nor[OF asUser_postModifyRegisters_corres[simplified]])
|
||||
apply (rule corres_split_nor[OF corres_when[OF refl restart_corres]])
|
||||
apply (rule corres_split_nor[OF corres_when[OF refl rescheduleRequired_corres]])
|
||||
|
|
|
@ -1448,10 +1448,7 @@ lemma no_fail_nativeThreadUsingFPU[wp]:
|
|||
"no_fail (\<top> and \<top>) (X64.nativeThreadUsingFPU thread)"
|
||||
supply Collect_const[simp del]
|
||||
apply (simp only: X64.nativeThreadUsingFPU_def)
|
||||
apply (rule no_fail_bind)
|
||||
apply (simp add: Arch.no_fail_machine_op_lift)
|
||||
apply simp
|
||||
apply wp
|
||||
apply (wpsimp wp: Arch.no_fail_machine_op_lift)
|
||||
done
|
||||
|
||||
lemma (in delete_one) prepareThreadDelete_corres:
|
||||
|
|
|
@ -1418,11 +1418,6 @@ lemma corres_assert_ret:
|
|||
apply (simp add: assert_def return_def fail_def)
|
||||
done
|
||||
|
||||
lemma corres_assert_assume_l:
|
||||
"corres dc P Q (f ()) g
|
||||
\<Longrightarrow> corres dc (P and (\<lambda>s. P')) Q (assert P' >>= f) g"
|
||||
by (force simp: corres_underlying_def assert_def return_def bind_def fail_def)
|
||||
|
||||
lemma corres_assert_assume_r:
|
||||
"corres dc P Q f (g ())
|
||||
\<Longrightarrow> corres dc P (Q and (\<lambda>s. Q')) f (assert Q' >>= g)"
|
||||
|
|
|
@ -1561,9 +1561,8 @@ lemma no_fail_asUser [wp]:
|
|||
"no_fail \<top> f \<Longrightarrow> no_fail (tcb_at' t) (asUser t f)"
|
||||
apply (simp add: asUser_def split_def)
|
||||
apply wp
|
||||
apply (simp add: no_fail_def)
|
||||
apply (wp hoare_drop_imps)
|
||||
apply simp
|
||||
apply (simp add: no_fail_def)
|
||||
apply (wpsimp wp: hoare_drop_imps no_fail_threadGet)+
|
||||
done
|
||||
|
||||
lemma asUser_setRegister_corres:
|
||||
|
|
|
@ -344,7 +344,7 @@ lemma invokeTCB_WriteRegisters_corres:
|
|||
mask_def user_vtop_def
|
||||
cong: if_cong)
|
||||
apply simp
|
||||
apply (rule no_fail_pre, wp no_fail_mapM)
|
||||
apply (wpsimp wp: no_fail_mapM no_fail_setRegister)
|
||||
apply (clarsimp simp: sanitiseOrFlags_def sanitiseAndFlags_def)
|
||||
apply ((safe)[1], (wp no_fail_setRegister | simp)+)
|
||||
apply (rule corres_split_nor[OF asUser_postModifyRegisters_corres[simplified]])
|
||||
|
|
11
run_tests
11
run_tests
|
@ -64,12 +64,10 @@ EXCLUDE["RISCV64"]=[
|
|||
EXCLUDE["AARCH64"]=[
|
||||
# To be eliminated/refined as development progresses
|
||||
"ASepSpec",
|
||||
"CKernel",
|
||||
"CBaseRefine",
|
||||
"CRefine",
|
||||
"Access",
|
||||
|
||||
# Tools and unrelated content, removed for development
|
||||
"CParser",
|
||||
"AutoCorres",
|
||||
"CamkesGlueSpec",
|
||||
"Sep_Algebra",
|
||||
|
@ -83,7 +81,8 @@ EXCLUDE["AARCH64"]=[
|
|||
"DSpec",
|
||||
"DBaseRefine",
|
||||
"CamkesGlueProofs",
|
||||
"AsmRefine"
|
||||
"AsmRefine",
|
||||
"SimplExportAndRefine"
|
||||
]
|
||||
|
||||
# Check EXCLUDE is exhaustive over the available architectures
|
||||
|
@ -123,7 +122,9 @@ returncode = 0
|
|||
for arch in archs:
|
||||
features = os.environ.get("L4V_FEATURES", "")
|
||||
plat = os.environ.get("L4V_PLAT", "")
|
||||
print(f"Testing for L4V_ARCH='{arch}', L4V_FEATURES='{features}', L4V_PLAT='{plat}':")
|
||||
num_domains = os.environ.get("INPUT_NUM_DOMAINS", "")
|
||||
print(f"Testing for L4V_ARCH='{arch}', L4V_FEATURES='{features}', L4V_PLAT='{plat}', "
|
||||
f"INPUT_NUM_DOMAINS='{num_domains}'")
|
||||
|
||||
os.environ["L4V_ARCH"] = arch
|
||||
# Provide L4V_ARCH_IS_ARM for Corres_Test in lib/ROOT
|
||||
|
|
|
@ -113,48 +113,7 @@ definition pte_base_addr :: "pte \<Rightarrow> paddr" where
|
|||
definition ppn_from_pptr :: "obj_ref \<Rightarrow> ppn" where
|
||||
"ppn_from_pptr p = ucast (addrFromPPtr p >> pageBits)"
|
||||
|
||||
definition vs_index_bits :: nat where
|
||||
"vs_index_bits \<equiv> if config_ARM_PA_SIZE_BITS_40 then 10 else (9::nat)"
|
||||
|
||||
lemma vs_index_bits_ge0[simp, intro!]: "0 < vs_index_bits"
|
||||
by (simp add: vs_index_bits_def)
|
||||
|
||||
(* A dependent-ish type in Isabelle. We use typedef here instead of value_type so that we can
|
||||
retain a symbolic value (vs_index_bits) for the size of the type instead of getting a plain
|
||||
number such as 9 or 10. *)
|
||||
typedef vs_index_len = "{n :: nat. n < vs_index_bits}" by auto
|
||||
|
||||
end
|
||||
|
||||
instantiation AARCH64_A.vs_index_len :: len0
|
||||
begin
|
||||
interpretation Arch .
|
||||
definition len_of_vs_index_len: "len_of (x::vs_index_len itself) \<equiv> CARD(vs_index_len)"
|
||||
instance ..
|
||||
end
|
||||
|
||||
instantiation AARCH64_A.vs_index_len :: len
|
||||
begin
|
||||
interpretation Arch .
|
||||
instance
|
||||
proof
|
||||
show "0 < LENGTH(vs_index_len)"
|
||||
by (simp add: len_of_vs_index_len type_definition.card[OF type_definition_vs_index_len])
|
||||
qed
|
||||
end
|
||||
|
||||
context Arch begin global_naming AARCH64_A
|
||||
|
||||
type_synonym vs_index = "vs_index_len word"
|
||||
|
||||
type_synonym pt_index_len = 9
|
||||
type_synonym pt_index = "pt_index_len word"
|
||||
|
||||
text \<open>Sanity check:\<close>
|
||||
lemma length_vs_index_len[simp]:
|
||||
"LENGTH(vs_index_len) = vs_index_bits"
|
||||
by (simp add: len_of_vs_index_len type_definition.card[OF type_definition_vs_index_len])
|
||||
|
||||
(* Sanity check for page table type sizes -- ptTranslationBits not yet available at definition site *)
|
||||
lemma vs_index_ptTranslationBits:
|
||||
"ptTranslationBits VSRootPT_T = LENGTH(vs_index_len)"
|
||||
by (simp add: ptTranslationBits_def vs_index_bits_def)
|
||||
|
|
|
@ -0,0 +1,131 @@
|
|||
(*
|
||||
* Copyright 2023, Proofcraft Pty Ltd
|
||||
*
|
||||
* SPDX-License-Identifier: GPL-2.0-only
|
||||
*)
|
||||
|
||||
theory Kernel_C
|
||||
imports
|
||||
"ExecSpec.MachineTypes"
|
||||
"CLib.CTranslationNICTA"
|
||||
"AsmRefine.CommonOps"
|
||||
begin
|
||||
|
||||
external_file
|
||||
"../c/build/$L4V_ARCH/kernel_all.c_pp"
|
||||
|
||||
context begin interpretation Arch .
|
||||
|
||||
requalify_types
|
||||
machine_state
|
||||
pt_array_len
|
||||
vs_array_len
|
||||
|
||||
end
|
||||
|
||||
declare [[populate_globals=true]]
|
||||
|
||||
context begin interpretation Arch . (*FIXME: arch_split*)
|
||||
|
||||
(* Sanity checks for array sizes. ptTranslationBits not yet available at definition site. *)
|
||||
lemma ptTranslationBits_vs_index_bits:
|
||||
"ptTranslationBits VSRootPT_T = vs_index_bits"
|
||||
by (simp add: ptTranslationBits_def vs_index_bits_def)
|
||||
|
||||
(* FIXME AARCH64: this is guaranteed to always succeed even though config_ARM_PA_SIZE_BITS_40
|
||||
is unfolded. It'd be nicer if we could also get something symbolic out of value_type, though *)
|
||||
lemma ptTranslationBits_vs_array_len':
|
||||
"2 ^ ptTranslationBits VSRootPT_T = vs_array_len"
|
||||
by (simp add: vs_array_len_def ptTranslationBits_vs_index_bits vs_index_bits_def
|
||||
Kernel_Config.config_ARM_PA_SIZE_BITS_40_def)
|
||||
|
||||
lemmas ptTranslationBits_vs_array_len = ptTranslationBits_vs_array_len'[unfolded vs_array_len_def]
|
||||
|
||||
type_synonym cghost_state =
|
||||
"(machine_word \<rightharpoonup> vmpage_size) \<times> \<comment> \<open>Frame sizes\<close>
|
||||
(machine_word \<rightharpoonup> nat) \<times> \<comment> \<open>CNode sizes\<close>
|
||||
(machine_word \<rightharpoonup> pt_type) \<times> \<comment> \<open>PT types\<close>
|
||||
ghost_assertions" \<comment> \<open>ASMRefine assertions\<close>
|
||||
|
||||
definition gs_clear_region :: "addr \<Rightarrow> nat \<Rightarrow> cghost_state \<Rightarrow> cghost_state" where
|
||||
"gs_clear_region ptr bits gs \<equiv>
|
||||
(\<lambda>x. if x \<in> {ptr..+2 ^ bits} then None else fst gs x,
|
||||
\<lambda>x. if x \<in> {ptr..+2 ^ bits} then None else fst (snd gs) x,
|
||||
\<lambda>x. if x \<in> {ptr..+2 ^ bits} then None else fst (snd (snd gs)) x,
|
||||
snd (snd (snd gs)))"
|
||||
|
||||
definition gs_new_frames:: "vmpage_size \<Rightarrow> addr \<Rightarrow> nat \<Rightarrow> cghost_state \<Rightarrow> cghost_state" where
|
||||
"gs_new_frames sz ptr bits \<equiv> \<lambda>gs.
|
||||
if bits < pageBitsForSize sz then gs
|
||||
else (\<lambda>x. if \<exists>n\<le>mask (bits - pageBitsForSize sz).
|
||||
x = ptr + n * 2 ^ pageBitsForSize sz then Some sz
|
||||
else fst gs x, snd gs)"
|
||||
|
||||
definition gs_new_cnodes:: "nat \<Rightarrow> addr \<Rightarrow> nat \<Rightarrow> cghost_state \<Rightarrow> cghost_state" where
|
||||
"gs_new_cnodes sz ptr bits \<equiv> \<lambda>gs.
|
||||
if bits < sz + 4 then gs
|
||||
else (fst gs, \<lambda>x. if \<exists>n\<le>mask (bits - sz - 4). x = ptr + n * 2 ^ (sz + 4)
|
||||
then Some sz
|
||||
else fst (snd gs) x, snd (snd gs))"
|
||||
|
||||
definition gs_new_pt_t:: "pt_type \<Rightarrow> addr \<Rightarrow> cghost_state \<Rightarrow> cghost_state" where
|
||||
"gs_new_pt_t pt_t ptr \<equiv>
|
||||
\<lambda>gs. (fst gs, fst (snd gs), (fst (snd (snd gs))) (ptr \<mapsto> pt_t), snd (snd (snd gs)))"
|
||||
|
||||
abbreviation gs_get_assn :: "int \<Rightarrow> cghost_state \<Rightarrow> machine_word" where
|
||||
"gs_get_assn k \<equiv> ghost_assertion_data_get k (snd \<circ> snd \<circ> snd)"
|
||||
|
||||
abbreviation gs_set_assn :: "int \<Rightarrow> machine_word \<Rightarrow> cghost_state \<Rightarrow> cghost_state" where
|
||||
"gs_set_assn k v \<equiv> ghost_assertion_data_set k v (apsnd \<circ> apsnd \<circ> apsnd)"
|
||||
|
||||
declare [[record_codegen = false]]
|
||||
declare [[allow_underscore_idents = true]]
|
||||
|
||||
end
|
||||
|
||||
(* Workaround for the fact that the retype annotations need the vmpage sizes*)
|
||||
(* create appropriately qualified aliases *)
|
||||
context begin interpretation Arch . global_naming vmpage_size
|
||||
requalify_consts ARMSmallPage ARMLargePage ARMHugePage
|
||||
end
|
||||
|
||||
(* Also need pt_type constructors for retype annotations. We leave them available globally for C. *)
|
||||
context begin interpretation Arch .
|
||||
requalify_consts NormalPT_T VSRootPT_T
|
||||
end
|
||||
|
||||
definition
|
||||
ctcb_size_bits :: nat
|
||||
where
|
||||
"ctcb_size_bits \<equiv> 10"
|
||||
|
||||
definition
|
||||
ctcb_offset :: "64 word"
|
||||
where
|
||||
"ctcb_offset \<equiv> 2 ^ ctcb_size_bits"
|
||||
|
||||
lemmas ctcb_offset_defs = ctcb_offset_def ctcb_size_bits_def
|
||||
|
||||
cond_sorry_modifies_proofs SORRY_MODIFIES_PROOFS
|
||||
|
||||
install_C_file "../c/build/$L4V_ARCH/kernel_all.c_pp"
|
||||
[machinety=machine_state, ghostty=cghost_state]
|
||||
|
||||
text \<open>Hide unqualified names conflicting with Kernel_Config names. Force use of Kernel_C prefix
|
||||
for these:\<close>
|
||||
hide_const (open)
|
||||
numDomains
|
||||
|
||||
(* hide vmpage sizes again *)
|
||||
hide_const
|
||||
vmpage_size.ARMSmallPage
|
||||
vmpage_size.ARMLargePage
|
||||
vmpage_size.ARMHugePage
|
||||
|
||||
(* re-allow fully qualified accesses (for consistency). Slightly clunky *)
|
||||
context Arch begin
|
||||
global_naming "AARCH64.vmpage_size" requalify_consts ARMSmallPage ARMLargePage ARMHugePage
|
||||
global_naming "AARCH64" requalify_consts ARMSmallPage ARMLargePage ARMHugePage
|
||||
end
|
||||
|
||||
end
|
|
@ -108,6 +108,10 @@ ${OVERLAY}: ${DEFAULT_OVERLAY}
|
|||
@cp $< $@
|
||||
endif
|
||||
|
||||
ifdef INPUT_NUM_DOMAINS
|
||||
KERNEL_CMAKE_EXTRA_OPTIONS += -DKernelNumDomains=${INPUT_NUM_DOMAINS}
|
||||
endif
|
||||
|
||||
# Initialize the CMake build. We purge the build directory and start again
|
||||
# whenever any of the kernel sources change, so that we can reliably pick up
|
||||
# changes to the build config.
|
||||
|
@ -148,7 +152,6 @@ ${CONFIG_DONE}: ${KERNEL_DEPS} gen-config-thy.py ${OVERLAY}
|
|||
${KERNEL_CMAKE_OPTIMISATION} ${KERNEL_CMAKE_EXTRA_OPTIONS} \
|
||||
${OVERLAY_OPT} \
|
||||
-G Ninja ${SOURCE_ROOT}
|
||||
cd ${KERNEL_CONFIG_ROOT} && ninja gen_config/kernel/gen_config.json
|
||||
@touch ${CONFIG_DONE}
|
||||
ifneq ($(L4V_ARCH),X64)
|
||||
@if [ "$$(diff -q ${OVERLAY} ${DEFAULT_OVERLAY})" ]; then \
|
||||
|
|
|
@ -101,5 +101,64 @@ definition irqVTimerEvent :: irq where
|
|||
definition pageColourBits :: nat where
|
||||
"pageColourBits \<equiv> undefined" \<comment> \<open>not implemented on this platform\<close>
|
||||
|
||||
|
||||
section \<open>Page table sizes\<close>
|
||||
|
||||
definition vs_index_bits :: nat where
|
||||
"vs_index_bits \<equiv> if config_ARM_PA_SIZE_BITS_40 then 10 else (9::nat)"
|
||||
|
||||
end
|
||||
|
||||
(* Need to declare code equation outside Arch locale *)
|
||||
declare AARCH64.vs_index_bits_def[code]
|
||||
|
||||
context Arch begin global_naming AARCH64
|
||||
|
||||
lemma vs_index_bits_ge0[simp, intro!]: "0 < vs_index_bits"
|
||||
by (simp add: vs_index_bits_def)
|
||||
|
||||
(* A dependent-ish type in Isabelle. We use typedef here instead of value_type so that we can
|
||||
retain a symbolic value (vs_index_bits) for the size of the type instead of getting a plain
|
||||
number such as 9 or 10. *)
|
||||
typedef vs_index_len = "{n :: nat. n < vs_index_bits}" by auto
|
||||
|
||||
end
|
||||
|
||||
instantiation AARCH64.vs_index_len :: len0
|
||||
begin
|
||||
interpretation Arch .
|
||||
definition len_of_vs_index_len: "len_of (x::vs_index_len itself) \<equiv> CARD(vs_index_len)"
|
||||
instance ..
|
||||
end
|
||||
|
||||
instantiation AARCH64.vs_index_len :: len
|
||||
begin
|
||||
interpretation Arch .
|
||||
instance
|
||||
proof
|
||||
show "0 < LENGTH(vs_index_len)"
|
||||
by (simp add: len_of_vs_index_len type_definition.card[OF type_definition_vs_index_len])
|
||||
qed
|
||||
end
|
||||
|
||||
context Arch begin global_naming AARCH64
|
||||
|
||||
type_synonym vs_index = "vs_index_len word"
|
||||
|
||||
type_synonym pt_index_len = 9
|
||||
type_synonym pt_index = "pt_index_len word"
|
||||
|
||||
text \<open>Sanity check:\<close>
|
||||
lemma length_vs_index_len[simp]:
|
||||
"LENGTH(vs_index_len) = vs_index_bits"
|
||||
by (simp add: len_of_vs_index_len type_definition.card[OF type_definition_vs_index_len])
|
||||
|
||||
|
||||
section \<open>C array sizes corresponding to page table sizes\<close>
|
||||
|
||||
value_type pt_array_len = "(2::nat) ^ LENGTH(pt_index_len)"
|
||||
value_type vs_array_len = "(2::nat) ^ vs_index_bits"
|
||||
|
||||
end
|
||||
|
||||
end
|
||||
|
|
|
@ -0,0 +1,33 @@
|
|||
(*
|
||||
* Copyright 2020, Data61, CSIRO (ABN 41 687 119 230)
|
||||
*
|
||||
* SPDX-License-Identifier: BSD-2-Clause
|
||||
*)
|
||||
|
||||
theory ArchSetup
|
||||
imports
|
||||
"CLib.CTranslationNICTA"
|
||||
begin
|
||||
|
||||
abbreviation (input)
|
||||
"(arch_load_machine_word
|
||||
(load_word32 :: word32 mem_read)
|
||||
(load_word64 :: word64 mem_read)
|
||||
:: machine_word mem_read)
|
||||
\<equiv> load_word64"
|
||||
|
||||
abbreviation (input)
|
||||
"(arch_store_machine_word
|
||||
(store_word32 :: word32 mem_upd)
|
||||
(store_word64 :: word64 mem_upd)
|
||||
:: machine_word mem_upd)
|
||||
\<equiv> store_word64"
|
||||
|
||||
abbreviation (input)
|
||||
"(arch_machine_word_constructor
|
||||
(from_word32 :: word32 \<Rightarrow> 'a)
|
||||
(from_word64 :: word64 \<Rightarrow> 'a)
|
||||
:: machine_word \<Rightarrow> 'a)
|
||||
\<equiv> from_word64"
|
||||
|
||||
end
|
|
@ -15,8 +15,8 @@ in [Isabelle/HOL][1]. In particular, it uses Norrish's
|
|||
abstracts the result to produce a result that is (hopefully)
|
||||
more pleasant to reason about.
|
||||
|
||||
[1]: https://www.cl.cam.ac.uk/research/hvg/Isabelle/
|
||||
[2]: https://trustworthy.systems/software/TS/c-parser/
|
||||
[1]: https://isabelle.in.tum.de
|
||||
[2]: https://github.com/seL4/l4v/blob/master/tools/c-parser/README.md
|
||||
|
||||
|
||||
|
||||
|
|
|
@ -69,8 +69,8 @@ text \<open>
|
|||
|
||||
As mentioned earlier, AutoCorres does not handle C code directly. The first
|
||||
step is to apply the
|
||||
C-Parser\footnote{\url{https://trustworthy.systems/software/TS/c-parser}} to
|
||||
obtain a SIMPL translation. We do this using the \texttt{install-C-file}
|
||||
C-Parser\footnote{\url{https://github.com/seL4/l4v/blob/master/tools/c-parser/README.md}}
|
||||
to obtain a SIMPL translation. We do this using the \texttt{install-C-file}
|
||||
command in Isabelle, as shown.
|
||||
|
||||
\<close>
|
||||
|
|
|
@ -25,7 +25,7 @@
|
|||
title = {{C-to-Isabelle} Parser, version 1.13.0},
|
||||
year = 2013,
|
||||
month = may,
|
||||
url = {https://trustworthy.systems/software/TS/c-parser/},
|
||||
url = {https://github.com/seL4/l4v/blob/master/tools/c-parser/README.md},
|
||||
note = {Accessed May 2016}
|
||||
}
|
||||
|
||||
|
@ -44,7 +44,7 @@
|
|||
journal = {Archive of Formal Proofs},
|
||||
month = feb,
|
||||
year = 2008,
|
||||
url = {https://www.isa-afp.org/entries/Simpl.shtml},
|
||||
url = {https://www.isa-afp.org/entries/Simpl.html},
|
||||
note = {Formal proof development},
|
||||
ISSN = {2150-914X},
|
||||
}
|
||||
|
@ -55,7 +55,7 @@
|
|||
journal = {Archive of Formal Proofs},
|
||||
month = may,
|
||||
year = 2012,
|
||||
url = {https://www.isa-afp.org/entries/Separation_Algebra.shtml},
|
||||
url = {https://www.isa-afp.org/entries/Separation_Algebra.html},
|
||||
note = {Formal proof development},
|
||||
ISSN = {2150-914x},
|
||||
}
|
||||
|
|
|
@ -47,7 +47,7 @@ proof (induct n arbitrary: m rule: less_induct)
|
|||
|
||||
show "no_ofail (\<lambda>_. unat x < m) (factorial' m x)"
|
||||
apply (subst factorial'.simps)
|
||||
apply (wp induct_asm ovalid_post_triv)
|
||||
apply (wpsimp wp: induct_asm ovalid_post_triv)
|
||||
apply unat_arith
|
||||
done
|
||||
qed
|
||||
|
|
|
@ -200,9 +200,10 @@ with TempDir(cleanup=(not args.no_cleanup)) as base_dir:
|
|||
shutil.copyfile(f_src, f_dest)
|
||||
|
||||
# Copy various other files.
|
||||
shutil.copyfile(
|
||||
os.path.join(args.repository, 'lib', 'Word_Lib', 'ROOT'),
|
||||
os.path.join(target_dir, 'lib', 'Word_Lib', 'ROOT'))
|
||||
for session in ['Basics', 'Eisbach_Tools', 'ML_Utils', 'Monads', 'Word_Lib']:
|
||||
shutil.copyfile(
|
||||
os.path.join(args.repository, 'lib', session, 'ROOT'),
|
||||
os.path.join(target_dir, 'lib', session, 'ROOT'))
|
||||
shutil.copyfile(
|
||||
os.path.join(release_files_dir, "ROOT.release"),
|
||||
os.path.join(target_dir, "autocorres", "ROOT"))
|
||||
|
@ -222,37 +223,11 @@ with TempDir(cleanup=(not args.no_cleanup)) as base_dir:
|
|||
os.path.join(args.repository, "LICENSES"),
|
||||
os.path.join(target_dir, "LICENSES"))
|
||||
|
||||
# Extract dependent sessions in lib. FIXME: rather kludgy
|
||||
print('Extracting sessions from lib/ROOT...')
|
||||
|
||||
# Set up ROOT for the tests dir, for the thydeps tool
|
||||
subprocess.check_call(
|
||||
['make', 'tests/ROOT'],
|
||||
cwd=os.path.join(args.repository, 'tools', 'autocorres'))
|
||||
|
||||
lib_sessions = ['Lib', 'CLib']
|
||||
lib_ROOT = os.path.join(args.repository, 'lib', 'ROOT')
|
||||
with open(lib_ROOT, 'r') as lib_root:
|
||||
data = lib_root.read()
|
||||
# Split out session specs. Assume ROOT file has standard indentation.
|
||||
chunks = data.split('\nsession ')
|
||||
# This will have the license header, etc.
|
||||
header = chunks[0]
|
||||
# Remaining sections. Try to remove comments
|
||||
sessions = ['session ' + re.sub(r'\(\*.*?\*\)', '', x, flags=re.DOTALL)
|
||||
for x in chunks[1:]]
|
||||
|
||||
new_root = header
|
||||
wanted_specs = {}
|
||||
for wanted in lib_sessions:
|
||||
spec = [spec for spec in sessions if spec.startswith('session %s ' % wanted)]
|
||||
if len(spec) != 1:
|
||||
print('error: %s session not found in %r' % (wanted, lib_ROOT))
|
||||
new_root += '\n' + spec[0]
|
||||
|
||||
with open(os.path.join(target_dir, 'lib', 'ROOT'), 'w') as root_f:
|
||||
root_f.write(new_root)
|
||||
|
||||
# For the examples, generate ".thy" files where appropriate, and also
|
||||
# generate an "All.thy" which contains all the examples.
|
||||
def gen_thy_file(c_file):
|
||||
|
|
|
@ -33,6 +33,7 @@ exception_rewrite.ML:
|
|||
Proof frameworks and code to rewrite monadic specifications to
|
||||
avoid using exceptions where possible.
|
||||
|
||||
NatBitwise.thy:
|
||||
WordAbstract.thy:
|
||||
word_abstract.ML:
|
||||
Word abstraction framework and theorems.
|
||||
|
|
|
@ -1,19 +1,21 @@
|
|||
Core Development Team
|
||||
---------------------
|
||||
Core Developers
|
||||
---------------
|
||||
|
||||
David Greenaway (inactive)
|
||||
|
||||
Japheth Lim <Japheth.Lim@data61.csiro.au>
|
||||
Japheth Lim (inactive)
|
||||
|
||||
Gerwin Klein (maintenance)
|
||||
|
||||
Contributions
|
||||
-------------
|
||||
|
||||
Lars Noschinski <noschinl@in.tum.de>
|
||||
Lars Noschinski
|
||||
|
||||
"owhile" definitions and related rules, as well as many other
|
||||
contributions to the proof libraries.
|
||||
|
||||
Matthew Brecknell <Matthew.Brecknell@data61.csiro.au>
|
||||
Matthew Brecknell (inactive)
|
||||
|
||||
Maintenance; integration with seL4's C refinement framework.
|
||||
|
||||
|
|
|
@ -1,43 +1,55 @@
|
|||
AutoCorres 1.9 (31 October 2022)
|
||||
--------------
|
||||
AutoCorres Change Log
|
||||
=====================
|
||||
|
||||
* Isabelle2021-1 edition of both AutoCorres and the C parser.
|
||||
AutoCorres 1.10 (3 Nov 2023)
|
||||
----------------------------
|
||||
|
||||
* Isabelle2023 edition of both AutoCorres and the C parser.
|
||||
|
||||
* Restructured and cleaned up monad libraries. Removed dependencies
|
||||
on unrelated l4v libraries.
|
||||
|
||||
AutoCorres 1.9 (31 October 2022)
|
||||
--------------------------------
|
||||
|
||||
* Isabelle2021-1 edition of both AutoCorres and the C parser.
|
||||
|
||||
AutoCorres 1.8 (31 October 2021)
|
||||
--------------
|
||||
--------------------------------
|
||||
|
||||
* Isabelle2021 edition of both AutoCorres and the C parser.
|
||||
* Isabelle2021 edition of both AutoCorres and the C parser.
|
||||
|
||||
AutoCorres 1.7 (2 November 2020)
|
||||
--------------
|
||||
--------------------------------
|
||||
|
||||
* Isabelle2020 edition of both AutoCorres and the C parser.
|
||||
* Isabelle2020 edition of both AutoCorres and the C parser.
|
||||
|
||||
* Slight updates to wp: use "wp (once)" instead of "wp_once"
|
||||
* Slight updates to wp: use "wp (once)" instead of "wp_once"
|
||||
|
||||
AutoCorres 1.6.1 (3 October 2019)
|
||||
----------------
|
||||
* Correct license for a C parser file. No code changes.
|
||||
---------------------------------
|
||||
|
||||
* Correct license for a C parser file. No code changes.
|
||||
|
||||
AutoCorres 1.6 (5 September 2019)
|
||||
--------------
|
||||
----------------------------------
|
||||
|
||||
* Isabelle2019 edition of both AutoCorres and the C parser.
|
||||
* Isabelle2019 edition of both AutoCorres and the C parser.
|
||||
|
||||
* Word abstraction has been extended to C bitwise operators.
|
||||
* Word abstraction has been extended to C bitwise operators.
|
||||
|
||||
AutoCorres 1.5 (10 September 2018)
|
||||
--------------
|
||||
----------------------------------
|
||||
|
||||
* Isabelle2018 edition of both AutoCorres and the C parser.
|
||||
* Isabelle2018 edition of both AutoCorres and the C parser.
|
||||
|
||||
AutoCorres 1.4 (2 March 2018)
|
||||
--------------
|
||||
-----------------------------
|
||||
|
||||
* Isabelle2017 edition of both AutoCorres and the C parser.
|
||||
|
||||
AutoCorres 1.3 (3 April 2017)
|
||||
--------------
|
||||
-----------------------------
|
||||
|
||||
* Isabelle2016-1 edition of both AutoCorres and the C parser.
|
||||
|
||||
|
@ -46,7 +58,7 @@ AutoCorres 1.3 (3 April 2017)
|
|||
must be selected using L4V_ARCH environment variable.
|
||||
|
||||
AutoCorres 1.2 (31 March 2016)
|
||||
--------------
|
||||
------------------------------
|
||||
|
||||
* Isabelle2016 edition of both AutoCorres and the C parser.
|
||||
|
||||
|
@ -59,7 +71,7 @@ AutoCorres 1.2 (31 March 2016)
|
|||
* Several minor bug fixes and improvements.
|
||||
|
||||
AutoCorres 1.1 (9 Oct 2015)
|
||||
--------------
|
||||
---------------------------
|
||||
|
||||
* Isabelle2015 edition of both AutoCorres and the C parser.
|
||||
|
||||
|
@ -77,7 +89,7 @@ AutoCorres 1.1 (9 Oct 2015)
|
|||
|
||||
|
||||
AutoCorres 1.0 (16 Dec 2014)
|
||||
--------------
|
||||
----------------------------
|
||||
|
||||
* New option “no_opt” to turn off simplifier stages. (Experimental)
|
||||
|
||||
|
|
|
@ -7,8 +7,8 @@ in [Isabelle/HOL][1]. In particular, it uses Norrish's
|
|||
abstracts the result to produce a result that is (hopefully)
|
||||
more pleasant to reason about.
|
||||
|
||||
[1]: https://www.cl.cam.ac.uk/research/hvg/Isabelle/
|
||||
[2]: https://trustworthy.systems/software/TS/c-parser/
|
||||
[1]: https://isabelle.in.tum.de/
|
||||
[2]: https://github.com/seL4/l4v/blob/master/tools/c-parser/README.md
|
||||
|
||||
|
||||
|
||||
|
@ -28,7 +28,7 @@ Contents of this README
|
|||
Installation
|
||||
------------
|
||||
|
||||
AutoCorres is packaged as a theory for Isabelle2021:
|
||||
AutoCorres is packaged as a theory for Isabelle2023:
|
||||
|
||||
https://isabelle.in.tum.de
|
||||
|
||||
|
@ -107,12 +107,12 @@ This package contains:
|
|||
|
||||
* Michael Norrish's C parser, used to translate C code into Isabelle:
|
||||
|
||||
https://trustworthy.systems/software/TS/c-parser/
|
||||
https://github.com/seL4/l4v/blob/master/tools/c-parser/README.md
|
||||
|
||||
* Norbert Schirmer's Simpl language and associated VCG tool. The
|
||||
C parser translates C into Schirmer's Simpl language:
|
||||
|
||||
https://www.isa-afp.org/entries/Simpl.shtml
|
||||
https://www.isa-afp.org/entries/Simpl.html
|
||||
|
||||
* Code from SML/NJ, including an implementation of binary sets
|
||||
(Binaryset.ML) and the mllex and mlyacc tools
|
||||
|
|
|
@ -8,15 +8,14 @@
|
|||
session AutoCorres = CParser +
|
||||
sessions
|
||||
"HOL-Eisbach"
|
||||
Lib
|
||||
CLib
|
||||
Monads
|
||||
theories
|
||||
"DataStructures"
|
||||
"AutoCorres"
|
||||
|
||||
session AutoCorresTest in "tests" = AutoCorres +
|
||||
sessions
|
||||
"HOL-Number_Theory"
|
||||
AutoCorres
|
||||
directories
|
||||
"parse-tests"
|
||||
"proof-tests"
|
||||
|
|
|
@ -1,4 +1,7 @@
|
|||
lib
|
||||
lib/Basics
|
||||
lib/Eisbach_Tools
|
||||
lib/ML_Utils
|
||||
lib/Monads
|
||||
lib/Word_Lib
|
||||
c-parser
|
||||
autocorres
|
||||
|
|
|
@ -9,7 +9,7 @@
|
|||
NB: These instructions apply to the stand-alone release of the C parser.
|
||||
If this is in an L4.verified checkout, see the top-level README.md instead.
|
||||
|
||||
This code requires Isabelle2021 and the MLton SML compiler.
|
||||
This code requires Isabelle2023 and the MLton SML compiler.
|
||||
|
||||
The C parser supports multiple target architectures:
|
||||
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
<!--
|
||||
Copyright 2024, Proofcraft Pty Ltd
|
||||
Copyright 2020, Data61, CSIRO (ABN 41 687 119 230)
|
||||
|
||||
SPDX-License-Identifier: CC-BY-SA-4.0
|
||||
|
@ -7,31 +8,125 @@
|
|||
The StrictC translation tool
|
||||
============================
|
||||
|
||||
To install, see the file INSTALL in the `src/c-parser` directory.
|
||||
The StrictC translation tool, also called the Isabelle C parser, translates a
|
||||
large subset of C99 code into the imperative language [Simpl] embedded in the
|
||||
theorem prover [Isabelle/HOL]. The Simpl language provides a verified
|
||||
verification condition generator and further tools for software verification.
|
||||
The tool is aimed at Isabelle/HOL experts with knowledge in program
|
||||
verification, reasoning in [Isabelle/HOL], Hoare logic, and C semantics.
|
||||
|
||||
The semantics the parser produces contains a C memory model that can be used to
|
||||
reason about the behaviour of low-level C programs. The memory model admits more
|
||||
behaviours than the C standard -- in particular, it does not require that memory
|
||||
be allocated via `alloc`, because this library function does typically not yet
|
||||
exist in low-level code such as OS kernel implementation.
|
||||
|
||||
To install, we recommend using one of the [releases] provided below and see the
|
||||
file INSTALL in the `src/c-parser` directory. You will need Isabelle and the
|
||||
[MLton compiler] for Standard ML.
|
||||
|
||||
To use:
|
||||
|
||||
1. Use the heap CParser that is created by installation
|
||||
2. Import the theory CTranslation
|
||||
1. Use the Isabelle session heap `CParser` that is created by installation
|
||||
2. Import the theory `CTranslation`
|
||||
3. Load ('install') C files into your theories with the Isar command
|
||||
`install_C_file`.
|
||||
|
||||
See `docs/ctranslation.pdf` for more information about the options
|
||||
and C language semantics that this tool provides.
|
||||
The [AutoCorres] tool can abstract and simplify most Simpl C code generated by
|
||||
the parser and is aimed at easing C verification. See the [AutoCorres] web page
|
||||
for more information.
|
||||
|
||||
See also the examples in the testfiles directory. For example,
|
||||
`breakcontinue.thy` is a fairly involved demonstration of doing things
|
||||
the hard way.
|
||||
[Isabelle/HOL]: https://isabelle.in.tum.de
|
||||
[Simpl]: https://isa-afp.org/entries/Simpl.html
|
||||
[releases]: #releases
|
||||
[testfiles]: testfiles/
|
||||
[breakcontinue]: testfiles/breakcontinue.thy
|
||||
[MLton compiler]: http://mlton.org
|
||||
[AutoCorres]: https://trustworthy.systems/projects/OLD/autocorres/
|
||||
|
||||
----------------------------------------------------------------------
|
||||
The translation tool builds on various open source projects by others.
|
||||
Documentation
|
||||
-------------
|
||||
|
||||
The [releases] contain the file `docs/ctranslation.pdf` which provides more
|
||||
information about the options and C language semantics that this tool provides.
|
||||
|
||||
See also the examples in the [testfiles] directory. For example,
|
||||
[`breakcontinue.thy`][breakcontinue] is a fairly involved demonstration of doing
|
||||
things the hard way.
|
||||
|
||||
|
||||
The following academic publications describe the C parser, C subset, and memory
|
||||
model:
|
||||
|
||||
- Harvey Tuch, Gerwin Klein, Micheal Norrish. [Types, bytes, and separation logic][1].
|
||||
POPL'07, pages 97-108, ACM, 2007.
|
||||
DOI [10.1145/1190215.1190234](https://doi.org/10.1145/1190215.1190234).
|
||||
- Harvey Tuch. [Formal memory models for verifying C systems code][2]. PhD thesis,
|
||||
University of New South Wales, 2008.
|
||||
DOI [10.26190/unsworks/17927](https://doi.org/10.26190/unsworks/17927).
|
||||
|
||||
[1]: https://trustworthy.systems/publications/papers/Tuch_KN_07.abstract
|
||||
[2]: https://trustworthy.systems/publications/papers/Tuch:phd.abstract
|
||||
|
||||
Supported C Subset
|
||||
------------------
|
||||
|
||||
The C parser supports a large subset of C99. The following C features are not
|
||||
supported:
|
||||
|
||||
- taking the address of local variables
|
||||
- floating point types
|
||||
- side effects in expressions, pre-increment and pre-decrement operators
|
||||
- `goto`
|
||||
- switch fall-through cases
|
||||
- variadic argument lists (`...`)
|
||||
- `static` local variables
|
||||
- limited support for function pointers
|
||||
|
||||
Releases
|
||||
--------
|
||||
|
||||
Current release:
|
||||
|
||||
- [c-parser-1.20.tar.gz][1.20] (Released 2023-11-03, Isabelle 2023)
|
||||
|
||||
Older releases:
|
||||
|
||||
- [c-parser-1.19.tar.gz][1.19] (Released 2022-10-31, Isabelle 2021-1)
|
||||
- [c-parser-1.18.tar.gz][1.18] (Released 2021-10-31, Isabelle 2021)
|
||||
- [c-parser-1.17.tar.gz][1.17] (Released 2020-11-02, Isabelle 2020)
|
||||
- [c-parser-1.16.1.tar.gz][1.16.1] (Released 2019-10-03, Isabelle 2019)
|
||||
- [c-parser-1.16.0.tar.gz][1.16.0] (Released 2019-09-05, Isabelle 2019)
|
||||
- [c-parser-1.15.0.tar.gz][1.15] (Released 2018-09-05, Isabelle 2018)
|
||||
- [c-parser-1.14.0.tar.gz][1.14] (Released 2018-03-02, Isabelle 2017)
|
||||
- [c-parser-1.13.0.tar.gz][1.13] (Released 2013-05-06, Isabelle 2013)
|
||||
- [c-parser-1.12.0.tar.gz][1.12] (Released 2012-12-05, Isabelle 2012)
|
||||
- [c-parser-1.0.tar.gz][1.0] (Released 2012-09-24, Isabelle 2011-1)
|
||||
|
||||
[1.20]: https://github.com/seL4/l4v/releases/download/autocorres-1.10/c-parser-1.20.tar.gz
|
||||
[1.19]: https://github.com/seL4/l4v/releases/download/autocorres-1.9/c-parser-1.19.tar.gz
|
||||
[1.18]: https://github.com/seL4/l4v/releases/download/autocorres-1.8/c-parser-1.18.tar.gz
|
||||
[1.17]: https://c-parser.s3.us-east-2.amazonaws.com/c-parser-1.17.tar.gz
|
||||
[1.16.1]: https://c-parser.s3.us-east-2.amazonaws.com/c-parser-1.16.1.tar.gz
|
||||
[1.16.0]: https://c-parser.s3.us-east-2.amazonaws.com/c-parser-1.16.0.tar.gz
|
||||
[1.15]: https://c-parser.s3.us-east-2.amazonaws.com/c-parser-1.15.0.tar.gz
|
||||
[1.14]: https://c-parser.s3.us-east-2.amazonaws.com/c-parser-1.14.0.tar.gz
|
||||
[1.13]: https://c-parser.s3.us-east-2.amazonaws.com/c-parser-1.13.0.tar.gz
|
||||
[1.12]: https://c-parser.s3.us-east-2.amazonaws.com/c-parser-1.12.0.tar.gz
|
||||
[1.0]: https://c-parser.s3.us-east-2.amazonaws.com/c-parser-1.0.tar.gz
|
||||
|
||||
License
|
||||
-------
|
||||
|
||||
The StrictC translation tool itself is available under the BSD 2-Clause license.
|
||||
It builds on the following open source projects by others.
|
||||
|
||||
1. Norbert Schirmer's Simpl language and associated VCG tool.
|
||||
|
||||
Sources for this are found in the Simpl/ directory. The
|
||||
code is covered by an LGPL licence.
|
||||
Sources for this are found in the [Simpl/](Simpl/) directory. The
|
||||
code is covered by the LGPL licence.
|
||||
|
||||
See <https://isa-afp.org/entries/Simpl.shtml>
|
||||
See <https://isa-afp.org/entries/Simpl.html>
|
||||
|
||||
2. Code from SML/NJ:
|
||||
- an implementation of binary sets (Binaryset.ML)
|
||||
|
|
|
@ -156,3 +156,9 @@
|
|||
##<decl_type>: <name>
|
||||
|
||||
e.g. `##Function: ctzl`
|
||||
|
||||
## 1.20
|
||||
|
||||
- Builds with Isabelle2023
|
||||
- Rearranged library session structure and included more libraries for heap
|
||||
reasoning in the release. See e.g. files TypHeapLib.thy and LemmaBucket_C.thy
|
||||
|
|
|
@ -10,4 +10,4 @@ Simpl
|
|||
This directory contains Norbert Schirmer's Simpl language and associated VCG
|
||||
tool. The code is covered by an LGPL licence.
|
||||
|
||||
See <http://afp.sourceforge.net/entries/Simpl.shtml>
|
||||
See <https://www.isa-afp.org/entries/Simpl.html>
|
||||
|
|
|
@ -145,7 +145,9 @@ fun get_Csyntax thy s = let
|
|||
val cpp_option =
|
||||
case Config.get_global thy cpp_path of
|
||||
"" => NONE
|
||||
| s => SOME s
|
||||
| s => SOME (if Path.is_absolute (Path.explode s)
|
||||
then s
|
||||
else Path.implode (Path.append (Resources.master_directory thy) (Path.explode s)))
|
||||
val cpp_error_count = Config.get_global thy report_cpp_errors
|
||||
val (ast0, _) =
|
||||
StrictCParser.parse
|
||||
|
|
|
@ -32,7 +32,8 @@ die ()
|
|||
if [ $# -ne 1 ]
|
||||
then
|
||||
echo "Usage:" >&2
|
||||
die " $0 tag" >&2
|
||||
echo " $0 <c-parser-tag-version>"
|
||||
die "e.g. $0 1.20" >&2
|
||||
fi
|
||||
|
||||
# Get the directory that this script is running in.
|
||||
|
@ -109,27 +110,17 @@ done
|
|||
|
||||
# other misc files
|
||||
cp -v lib/Word_Lib/ROOT "$outputdir/src/lib/Word_Lib/"
|
||||
cp -v lib/Basics/ROOT "$outputdir/src/lib/Basics/"
|
||||
cp -v lib/ML_Utils/ROOT "$outputdir/src/lib/ML_Utils/"
|
||||
|
||||
echo "Creating ROOTS file"
|
||||
cat >"$outputdir/src/ROOTS" <<EOF
|
||||
lib/Word_Lib
|
||||
lib
|
||||
lib/Basics
|
||||
lib/ML_Utils
|
||||
c-parser
|
||||
EOF
|
||||
|
||||
echo "Adding ROOT file for Lib session"
|
||||
cat > "$outputdir/src/lib/ROOT" <<EOF
|
||||
(*
|
||||
* Copyright 2020, Data61, CSIRO (ABN 41 687 119 230)
|
||||
*
|
||||
* SPDX-License-Identifier: BSD-2-Clause
|
||||
*)
|
||||
|
||||
session Lib = HOL +
|
||||
directories "ml-helpers"
|
||||
theories "ML_Utils"
|
||||
EOF
|
||||
|
||||
echo "Rearranging directories"
|
||||
/bin/mv -v "$outputdir/src/tools/c-parser/README.md" "$outputdir"
|
||||
/bin/mv -v "$outputdir/src/tools/c-parser" "$outputdir/src/"
|
||||
|
@ -183,7 +174,6 @@ sed '
|
|||
' < standalone-parser/Makefile > "$outputdir/src/c-parser/standalone-parser/Makefile"
|
||||
popd > /dev/null
|
||||
|
||||
|
||||
echo "Making PDF of ctranslation file."
|
||||
cd "$outputdir/src/c-parser/doc"
|
||||
make ctranslation.pdf > /dev/null
|
||||
|
@ -192,22 +182,6 @@ mv ctranslation.pdf "$outputdir/doc"
|
|||
|
||||
popd > /dev/null
|
||||
|
||||
lookforlicense=$(find "$outputdir" \! -name '*.lex.sml' \! -name '*.grm.sml' \! -type d -exec grep -q @LICENSE \{\} \; -print)
|
||||
if [ -n "$lookforlicense" ]
|
||||
then
|
||||
die "### @LICENSE detected in file(s) $lookforlicense"
|
||||
else
|
||||
echo "No @LICENSEs remain unexpanded - good."
|
||||
fi
|
||||
|
||||
lookformichaeln=$(find "$outputdir" \! -name RELEASES.md \! -type d -exec grep -q /michaeln \{\} \; -print)
|
||||
if [ -n "$lookformichaeln" ]
|
||||
then
|
||||
die "### /michaeln detected in file(s) $lookformichaeln"
|
||||
else
|
||||
echo "No occurrences of \"/michaeln\" - good."
|
||||
fi
|
||||
|
||||
echo -n "Compressing into $stem.tar.gz: "
|
||||
mv "$tmpdir/c-parser" "$tmpdir/$stem"
|
||||
|
||||
|
|
Loading…
Reference in New Issue