Compare commits
283 Commits
c443a1238b
...
d667a05cd2
Author | SHA1 | Date |
---|---|---|
Achim D. Brucker | d667a05cd2 | |
Achim D. Brucker | eca211810d | |
Achim D. Brucker | b61b2c370a | |
Achim D. Brucker | 582d7750e8 | |
Achim D. Brucker | b2a3aba5fe | |
Achim D. Brucker | e3359077a6 | |
Achim D. Brucker | 51f3d8fe82 | |
Gerwin Klein | 76dbb29e7e | |
Gerwin Klein | a2696127d0 | |
Gerwin Klein | ea6477ba94 | |
Gerwin Klein | 18dd0deb8a | |
Gerwin Klein | 591735d3e8 | |
Gerwin Klein | ffd76022d4 | |
Michael McInerney | 8e9ba18e00 | |
Gerwin Klein | 47fa26c8a8 | |
Michael McInerney | 5dd241022a | |
Michael McInerney | 0f999d6d39 | |
Michael McInerney | 1def78d062 | |
Michael McInerney | 6b4c27b59b | |
Michael McInerney | bcf6e90a04 | |
Michael McInerney | e4e232ae25 | |
Michael McInerney | f65463b1b1 | |
Michael McInerney | 3cf0afabc9 | |
Michael McInerney | a7f9a415e6 | |
Michael McInerney | 92ad82ff79 | |
Michael McInerney | be7ac0ee39 | |
Michael McInerney | 31446bf80c | |
Michael McInerney | 63dd1bb027 | |
Michael McInerney | 6110badfda | |
Michael McInerney | f1f9a4f0be | |
Michael McInerney | 2c6a4f1162 | |
Michael McInerney | 7ac9d467bb | |
Michael McInerney | 24f0608bc3 | |
Michael McInerney | debc3e3ac6 | |
Michael McInerney | 738dc0630a | |
Michael McInerney | 65dca4e274 | |
Gerwin Klein | 91fc2ab9c3 | |
Gerwin Klein | 037b453b2b | |
Gerwin Klein | e4d5bf1d41 | |
Gerwin Klein | 8420d4d90f | |
Gerwin Klein | c8ea846955 | |
Gerwin Klein | 594da13093 | |
Gerwin Klein | 6311fa370a | |
Michael McInerney | b3b26b2d4e | |
Rafal Kolanski | c8f82f20f2 | |
Gerwin Klein | d2002eacb3 | |
Gerwin Klein | 0c9d55149a | |
Gerwin Klein | b21b5e52af | |
Gerwin Klein | d6a43f64e0 | |
Gerwin Klein | 17d9815846 | |
Gerwin Klein | 279493f917 | |
Gerwin Klein | 8ee47e5b34 | |
Gerwin Klein | 7a57e038a3 | |
Gerwin Klein | 74008a7eb2 | |
Gerwin Klein | 40bd416d92 | |
Gerwin Klein | 1870cfda97 | |
Michael McInerney | 55b9b4ab78 | |
Michael McInerney | 19b3695e74 | |
Michael McInerney | 2019f90735 | |
Michael McInerney | 6e94659ec1 | |
Corey Lewis | 7bbb331fc6 | |
Gerwin Klein | f53fb0dfbf | |
Gerwin Klein | 32cfb0a3b6 | |
Rafal Kolanski | 96c48ebb1e | |
Rafal Kolanski | 9bea43dbe5 | |
Rafal Kolanski | c10c358665 | |
Gerwin Klein | 27ae0304bd | |
Gerwin Klein | 69a8399f04 | |
Gerwin Klein | e94d95e6dc | |
Gerwin Klein | a0bf53768d | |
Gerwin Klein | e1d8f72b75 | |
Gerwin Klein | b28a62b8b6 | |
Gerwin Klein | aa5220f8d9 | |
Gerwin Klein | 51debe74b7 | |
Achim D. Brucker | e1ae5ffd8f | |
Achim D. Brucker | d6c735bddd | |
Achim D. Brucker | 884f47b1b3 | |
Gerwin Klein | eb8d926486 | |
Gerwin Klein | 60c403a193 | |
Gerwin Klein | 5a7a162df5 | |
Gerwin Klein | 760eb1cda8 | |
Gerwin Klein | 20948e6636 | |
Gerwin Klein | 7632e115ec | |
Michael McInerney | b0474bdb18 | |
Gerwin Klein | 4add4bdbf2 | |
Michael McInerney | 57f60084e8 | |
Michael McInerney | c3d54ae139 | |
Michael McInerney | 754528f1bf | |
Michael McInerney | 0160613c1a | |
Michael McInerney | a8d4685081 | |
Michael McInerney | 648674b351 | |
Michael McInerney | 62fa6c7832 | |
Michael McInerney | a4707d76bb | |
Michael McInerney | 9fc9facf73 | |
Michael McInerney | ea1306b4f7 | |
Michael McInerney | 4b53e8f5ed | |
Michael McInerney | 65eb0ef069 | |
Michael McInerney | 94fbb8c5e3 | |
Michael McInerney | a182869701 | |
Michael McInerney | af11638955 | |
Michael McInerney | 682942dc8d | |
Michael McInerney | a2ae053a70 | |
Michael McInerney | 267093778e | |
Michael McInerney | 75cf325222 | |
Michael McInerney | d9c3bdf645 | |
Michael McInerney | f9a46ee0c2 | |
Gerwin Klein | f8582b67a3 | |
Gerwin Klein | 33531132e9 | |
Gerwin Klein | d9191c22b0 | |
Gerwin Klein | d8c497e879 | |
Gerwin Klein | 34c3ff1b1b | |
Gerwin Klein | a9ae2e51ad | |
Gerwin Klein | 09ae378184 | |
Michael McInerney | c852e35015 | |
Rafal Kolanski | 4841f184cc | |
Gerwin Klein | 7d915a6dfd | |
Gerwin Klein | 4848ebae49 | |
Gerwin Klein | 9aafd8d84e | |
Gerwin Klein | b4e8ce37c4 | |
Gerwin Klein | fb57ddde8e | |
Gerwin Klein | 1e65b74ab8 | |
Gerwin Klein | 1a783b93c9 | |
Gerwin Klein | 51b7c53e15 | |
Gerwin Klein | fc9821e337 | |
Gerwin Klein | 20b09e19a2 | |
Gerwin Klein | f6702ed92b | |
Michael McInerney | cd22fc388f | |
Michael McInerney | 8549a7fbc2 | |
Michael McInerney | 81eaa5c7f0 | |
Michael McInerney | 5992b66f87 | |
Corey Lewis | c99f9bf400 | |
Gerwin Klein | 197b7980f8 | |
Gerwin Klein | 42767c69bf | |
Rafal Kolanski | e67513b920 | |
Rafal Kolanski | febf5312f2 | |
Rafal Kolanski | 0f4e5a5091 | |
Gerwin Klein | 34ec0ba6b7 | |
Gerwin Klein | ab92fb0de0 | |
Gerwin Klein | 0f1e9fc3fe | |
Gerwin Klein | 902e75f5a2 | |
Gerwin Klein | f4a47b97f2 | |
Gerwin Klein | f8320364d1 | |
Gerwin Klein | 5c021c3789 | |
Gerwin Klein | ee07f0ccf3 | |
Achim D. Brucker | aab92eadfd | |
Gerwin Klein | a3ef59d713 | |
Gerwin Klein | 394de1638c | |
Gerwin Klein | bd41a79d9f | |
Gerwin Klein | 4ef04c5615 | |
Gerwin Klein | 701998bf68 | |
Gerwin Klein | 3af98697e7 | |
Michael McInerney | dbd3766ca8 | |
Gerwin Klein | b93e844205 | |
Michael McInerney | 1e3ab08f16 | |
Michael McInerney | 05b05ceb48 | |
Michael McInerney | b085396459 | |
Michael McInerney | 24f6286795 | |
Michael McInerney | 7e88cf59e4 | |
Michael McInerney | bfaf5d27a0 | |
Michael McInerney | dd1d17df54 | |
Michael McInerney | 0c6a7a5ce9 | |
Michael McInerney | dfae3cf988 | |
Michael McInerney | e75fe667b7 | |
Michael McInerney | a98838c734 | |
Michael McInerney | cbb20c9a5c | |
Michael McInerney | dee8d19343 | |
Michael McInerney | e3c4c17bc8 | |
Michael McInerney | 1c97cb2a61 | |
Michael McInerney | 2a5f5990a3 | |
Michael McInerney | c51d20bbb7 | |
Michael McInerney | 73b9897588 | |
Michael McInerney | 913e29a021 | |
Michael McInerney | fcf42c4dcb | |
Michael McInerney | 6bba5d10ed | |
Gerwin Klein | 84b6755eb2 | |
Gerwin Klein | 71a61cec45 | |
Gerwin Klein | feb1e56a49 | |
Gerwin Klein | 1a24861804 | |
Gerwin Klein | ee12ff5a99 | |
Gerwin Klein | b57d0066a1 | |
Gerwin Klein | 6d20b07581 | |
Michael McInerney | 01108bc46e | |
Rafal Kolanski | 7447b30c31 | |
Gerwin Klein | 4717d5d952 | |
Gerwin Klein | 2b6bf36cec | |
Gerwin Klein | 9b954c1966 | |
Gerwin Klein | 80ea5b064c | |
Gerwin Klein | 069870d1e7 | |
Gerwin Klein | 6f63d5fc78 | |
Gerwin Klein | 8b58389527 | |
Gerwin Klein | 23b41def89 | |
Gerwin Klein | cccb1202dd | |
Gerwin Klein | 6731671632 | |
Gerwin Klein | b94be66b3d | |
Michael McInerney | 56757f00d0 | |
Michael McInerney | 6915c6979d | |
Michael McInerney | 496de5d435 | |
Michael McInerney | 346ab9cf84 | |
Corey Lewis | 78b7fdbd5b | |
Gerwin Klein | 002daabe78 | |
Gerwin Klein | 2534e539e8 | |
Rafal Kolanski | 592d6e856d | |
Rafal Kolanski | 97bb100505 | |
Rafal Kolanski | c5ff4c846e | |
Gerwin Klein | 05a629ac64 | |
Gerwin Klein | 2c503e3609 | |
Gerwin Klein | 638b7813f6 | |
Gerwin Klein | 41af104278 | |
Gerwin Klein | fb58ae12d7 | |
Gerwin Klein | a5088463cb | |
Gerwin Klein | 1ee77d207c | |
Gerwin Klein | e8f9dab83c | |
Gerwin Klein | 8f5e6540de | |
Gerwin Klein | c84bb14362 | |
Gerwin Klein | f9359ead94 | |
Gerwin Klein | 4d8ad41f93 | |
Gerwin Klein | b6645c1a06 | |
Gerwin Klein | 4d7cc19ec0 | |
Michael McInerney | dd315e16dd | |
Gerwin Klein | 52b4ba5091 | |
Michael McInerney | afcbba9a6d | |
Michael McInerney | a463ca7a9a | |
Michael McInerney | 3255722e1a | |
Michael McInerney | 10f91566b1 | |
Michael McInerney | b6b89fe713 | |
Michael McInerney | a3eb113e75 | |
Michael McInerney | d5db58fbe9 | |
Michael McInerney | 4905a8ee97 | |
Michael McInerney | 5ac6180742 | |
Michael McInerney | 0b10a21b8c | |
Michael McInerney | 04d4575be5 | |
Michael McInerney | 45cde7049b | |
Michael McInerney | 7a14a48ba5 | |
Michael McInerney | 0cdce52f0b | |
Michael McInerney | 3c9065f0da | |
Michael McInerney | 11614b920e | |
Michael McInerney | 97e40a03bf | |
Michael McInerney | 918ec7a283 | |
Michael McInerney | ea9bfb0284 | |
Michael McInerney | 1e32077cf1 | |
Michael McInerney | dc62bfdfeb | |
Gerwin Klein | 449cfc702e | |
Gerwin Klein | e76c69ee07 | |
Gerwin Klein | 028c3e6241 | |
Gerwin Klein | 62f0e202ef | |
Gerwin Klein | 81840fe278 | |
Gerwin Klein | a828b996db | |
Gerwin Klein | 1278a8d06e | |
Michael McInerney | 0ba3f8e8af | |
Rafal Kolanski | d4f0e5a38b | |
Gerwin Klein | 03e4ef92a4 | |
Gerwin Klein | 4743f0bb57 | |
Gerwin Klein | de58596f23 | |
Gerwin Klein | 723620f084 | |
Gerwin Klein | 5a8dea120c | |
Gerwin Klein | a46083c659 | |
Gerwin Klein | 707e54672c | |
Gerwin Klein | 4e55740215 | |
Gerwin Klein | a9f492672c | |
Gerwin Klein | 7a4b08160c | |
Gerwin Klein | 1fa8d8c08f | |
Michael McInerney | b3c6df48a2 | |
Michael McInerney | 49ff8457f2 | |
Michael McInerney | 8c433c0850 | |
Michael McInerney | 82b9548170 | |
Corey Lewis | 8c59df4495 | |
Gerwin Klein | 5632323979 | |
Gerwin Klein | 1cce5b3ff7 | |
Rafal Kolanski | 402a8342db | |
Rafal Kolanski | 32a672412c | |
Rafal Kolanski | b2cd1ce4ad | |
Gerwin Klein | 67c0109b74 | |
Gerwin Klein | bd50264385 | |
Gerwin Klein | 4d00865673 | |
Gerwin Klein | 598e19dd63 | |
Gerwin Klein | 4cbfb4ab0b | |
Gerwin Klein | b2dd5db6d1 | |
Gerwin Klein | 03a045309e | |
Gerwin Klein | d0bab9c79f | |
Achim D. Brucker | 19c186ae5c | |
Achim D. Brucker | 4551b58ddb | |
Achim D. Brucker | 3ee2f3e284 | |
Achim D. Brucker | a57b87fc9e |
|
@ -37,6 +37,7 @@ jobs:
|
|||
fail-fast: false
|
||||
matrix:
|
||||
arch: [ARM, ARM_HYP, AARCH64, RISCV64, X64]
|
||||
num_domains: ['1', '']
|
||||
# test only most recent push:
|
||||
concurrency: l4v-regression-${{ github.ref }}-${{ strategy.job-index }}
|
||||
steps:
|
||||
|
@ -45,6 +46,7 @@ jobs:
|
|||
with:
|
||||
L4V_ARCH: ${{ matrix.arch }}
|
||||
xml: ${{ needs.code.outputs.xml }}
|
||||
NUM_DOMAINS: ${{ matrix.num_domains }}
|
||||
env:
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
|
@ -58,7 +60,7 @@ jobs:
|
|||
- name: Upload logs
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: logs-${{ matrix.arch }}
|
||||
name: logs-${{ matrix.num_domains }}-${{ matrix.arch }}
|
||||
path: logs.tar.xz
|
||||
|
||||
deploy:
|
||||
|
@ -77,6 +79,13 @@ jobs:
|
|||
token: ${{ secrets.PRIV_REPO_TOKEN }}
|
||||
tag: "l4v/proof-deploy/${{ github.event_name }}"
|
||||
|
||||
# Automatically rebase platform branches on pushes to master.
|
||||
# This workflow here on the master branch attempts a git rebase of the platform
|
||||
# branches listed in the build matrix below. If the rebase succeeds, the rebased
|
||||
# branch is pushed under the name `<branch>-rebased`. This triggers the build
|
||||
# workflow on the `<branch>-rebased` branch, which will run the proofs. If the
|
||||
# proofs succeed, the `<branch>-rebased` branch is force-pushed over
|
||||
# `<branch>`, becoming the new platform branch.
|
||||
rebase:
|
||||
name: Rebase platform branches
|
||||
runs-on: ubuntu-latest
|
||||
|
|
|
@ -18,11 +18,13 @@ jobs:
|
|||
fail-fast: false
|
||||
matrix:
|
||||
arch: [ARM, ARM_HYP, AARCH64, RISCV64, X64]
|
||||
num_domains: ['1', '7', '']
|
||||
steps:
|
||||
- name: Proofs
|
||||
uses: seL4/ci-actions/aws-proofs@master
|
||||
with:
|
||||
L4V_ARCH: ${{ matrix.arch }}
|
||||
NUM_DOMAINS: ${{ matrix.num_domains }}
|
||||
cache_read: '' # start with empty cache, but write cache back (default)
|
||||
env:
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
|
@ -38,7 +40,7 @@ jobs:
|
|||
- name: Upload logs
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: logs-${{ matrix.arch }}
|
||||
name: logs-${{ matrix.num_domains }}-${{ matrix.arch }}
|
||||
path: logs.tar.xz
|
||||
|
||||
binary-verification:
|
||||
|
|
201
README.md
201
README.md
|
@ -1,189 +1,48 @@
|
|||
<!--
|
||||
Copyright 2020, Data61, CSIRO (ABN 41 687 119 230)
|
||||
# A Fork of AutoCorres
|
||||
|
||||
SPDX-License-Identifier: CC-BY-SA-4.0
|
||||
-->
|
||||
This repository is a fork of the [seL4/l4v](https://github.com/seL4/l4v)
|
||||
repository. The main purpose is to maintain a local version of
|
||||
[AutoCorres](https://trustworthy.systems/projects/TS/autocorres/).
|
||||
|
||||
[![DOI][0]](http://dx.doi.org/10.5281/zenodo.591732)
|
||||
[![CI](https://github.com/seL4/l4v/actions/workflows/push.yml/badge.svg)](https://github.com/seL4/l4v/actions/workflows/push.yml)
|
||||
[![Proofs](https://github.com/seL4/l4v/actions/workflows/proof-deploy.yml/badge.svg)](https://github.com/seL4/l4v/actions/workflows/proof-deploy.yml)
|
||||
[![Weekly Clean](https://github.com/seL4/l4v/actions/workflows/weekly-clean.yml/badge.svg)](https://github.com/seL4/l4v/actions/workflows/weekly-clean.yml)
|
||||
[![External](https://github.com/seL4/l4v/actions/workflows/external.yml/badge.svg)](https://github.com/seL4/l4v/actions/workflows/external.yml)
|
||||
|
||||
MCS:\
|
||||
[![CI](https://github.com/seL4/l4v/actions/workflows/push.yml/badge.svg?branch=rt)](https://github.com/seL4/l4v/actions/workflows/push.yml)
|
||||
[![RT Proofs](https://github.com/seL4/l4v/actions/workflows/proof.yml/badge.svg?branch=rt)](https://github.com/seL4/l4v/actions/workflows/proof.yml)
|
||||
|
||||
[0]: https://zenodo.org/badge/doi/10.5281/zenodo.591732.svg
|
||||
Each feature changed with respect to the upstream repository is tracked on its
|
||||
own branch.
|
||||
|
||||
|
||||
[The L4.verified Proofs][1]
|
||||
===========================
|
||||
The main brach (called "master") contains all features from the individual
|
||||
feature branches described below:
|
||||
|
||||
This is the L4.verified git repository with formal specifications and
|
||||
proofs for the seL4 microkernel.
|
||||
## Active Branches
|
||||
|
||||
Most proofs in this repository are conducted in the interactive proof
|
||||
assistant [Isabelle/HOL][2]. For an introduction to Isabelle, see its
|
||||
[official website][2] and [documentation][3].
|
||||
### Branch "autogenerated_parser"
|
||||
|
||||
[1]: https://github.com/seL4/l4v "L4.verified Repository"
|
||||
[2]: http://isabelle.in.tum.de "Isabelle Website"
|
||||
[3]: http://isabelle.in.tum.de/documentation.html "Isabelle Documentation"
|
||||
This branch adds the C-parser files generated by mllex and mlyacc to reduce
|
||||
dependencies required by users of AutoCorres.
|
||||
|
||||
<a name="setup"></a>
|
||||
Setup
|
||||
-----
|
||||
### Branch "renaming_Word_Lib"
|
||||
|
||||
This repository is meant to be used as part of a Google [repo][5] setup. Instead
|
||||
of cloning it directly, please follow the directions for software dependencies
|
||||
and Isabelle installation in the [setup.md](docs/setup.md) file in the `docs`
|
||||
directory.
|
||||
This branch renames the session Word_Lib shipped by AutoCorres to Word_Lib_l4v to
|
||||
avoid a name clash with the AFP entry of same name. This allows one to use
|
||||
AutoCorres with the AFP being registered as an Isabelle component.
|
||||
|
||||
[5]: https://gerrit.googlesource.com/git-repo/+/HEAD/README.md
|
||||
### Branch "cpp_wrapper"
|
||||
|
||||
Contributing
|
||||
------------
|
||||
This branch adds an OS check to the cpp-wrapper script. This allows using the cpp wrapper
|
||||
on all supported operating systems, allowing a uniform ``declare [[cpp_path = "..."]]``
|
||||
across all supported operating systems.
|
||||
|
||||
Contributions to this repository are welcome.
|
||||
Please read [`CONTRIBUTING.md`](CONTRIBUTING.md) for details.
|
||||
### Branch "cpp_path_relative"
|
||||
|
||||
Overview
|
||||
--------
|
||||
This branch converts a relative ``cpp_path`` (e.g., ``declare [[cpp_path = "tools/cpp_wrapper"]]``
|
||||
into an absolute path, relative to the theory using ``install_C_file``. This minimized the need
|
||||
for local configuration that cannot be easily shared within a development team.
|
||||
|
||||
The repository is organised as follows.
|
||||
### Branch "umm_types_realtive"
|
||||
|
||||
* [`docs`](docs/): documentation on conventions, style, etc.
|
||||
Ensure that umm_types.txt is saved relative to theory file.
|
||||
|
||||
* [`spec`](spec/): a number of different formal specifications of seL4
|
||||
* [`abstract`](spec/abstract/): the functional abstract specification of seL4
|
||||
* [`sep-abstract`](spec/sep-abstract/): an abstract specification for a reduced
|
||||
version of seL4 that is configured as a separation kernel
|
||||
* [`haskell`](spec/haskell/): Haskell model of the seL4 kernel, kept in sync
|
||||
with the C code
|
||||
* [`machine`](spec/machine/): the machine interface of these two specifications
|
||||
* [`cspec`](spec/cspec/): the entry point for automatically translating the seL4 C code
|
||||
into Isabelle
|
||||
* [`capDL`](spec/capDL/): a specification of seL4 that abstracts from memory content and
|
||||
concrete execution behaviour, modelling the protection state of the
|
||||
system in terms of capabilities. This specification corresponds to the
|
||||
capability distribution language *capDL* that can be used to initialise
|
||||
user-level systems on top of seL4.
|
||||
* [`take-grant`](spec/take-grant/): a formalisation of the classical take-grant security
|
||||
model, applied to seL4, but not connected to the code of seL4.
|
||||
## Obsolete Branches
|
||||
|
||||
* There are additional specifications that are not tracked in this repository,
|
||||
but are generated from other files:
|
||||
* [`design`](spec/design/): the design-level specification of seL4,
|
||||
generated from the Haskell model.
|
||||
* [`c`](spec/cspec/c/): the C code of the seL4 kernel, preprocessed into a form that
|
||||
can be read into Isabelle. This is generated from the [seL4 repository](https://github.com/seL4/seL4).
|
||||
### Branch "isabelle2023"
|
||||
|
||||
* [`proof`](proof/): the seL4 proofs
|
||||
* [`invariant-abstract`](proof/invariant-abstract/): invariants of the seL4 abstract specification
|
||||
* [`refine`](proof/refine/): refinement between abstract and design specifications
|
||||
* [`crefine`](proof/crefine/): refinement between design specification and C semantics
|
||||
* [`access-control`](proof/access-control/): integrity and authority confinement proofs
|
||||
* [`infoflow`](proof/infoflow/): confidentiality and intransitive non-interference proofs
|
||||
* [`asmrefine`](proof/asmrefine/): Isabelle/HOL part of the seL4 binary verification
|
||||
* [`drefine`](proof/drefine/): refinement between capDL and abstract specification
|
||||
* [`sep-capDL`](proof/sep-capDL/): a separation logic instance on capDL
|
||||
* [`capDL-api`](proof/capDL-api/): separation logic specifications of selected seL4 APIs
|
||||
|
||||
* [`lib`](lib/): generic proof libraries, proof methods and tools. Among these,
|
||||
further libraries for fixed-size machine words, a formalisation of state
|
||||
monads with nondeterminism and exceptions, a generic verification condition
|
||||
generator for monads, a recursive invariant prover for these (`crunch`), an
|
||||
abstract separation logic formalisation, a prototype of the [Eisbach][6] proof
|
||||
method language, a prototype `levity` refactoring tool, and others.
|
||||
|
||||
* [`tools`](tools/): larger, self-contained proof tools
|
||||
* [`asmrefine`](tools/asmrefine/): the generic Isabelle/HOL part of the binary
|
||||
verification tool
|
||||
* [`c-parser`](tools/c-parser/): a parser from C into the Simpl language in Isabelle/HOL.
|
||||
Includes a C memory model.
|
||||
* [`autocorres`](tools/autocorres/): an automated, proof-producing abstraction tool from
|
||||
C into higher-level Isabelle/HOL functions, based on the C parser above
|
||||
* [`haskell-translator`](tools/haskell-translator/): a basic python script for converting the Haskell
|
||||
prototype of seL4 into the executable design specification in
|
||||
Isabelle/HOL.
|
||||
|
||||
* [`misc`](misc/): miscellaneous scripts and build tools
|
||||
|
||||
* [`camkes`](camkes/): an initial formalisation of the CAmkES component platform
|
||||
on seL4. Work in progress.
|
||||
|
||||
* [`sys-init`](sys-init/): specification of a capDL-based, user-level system initialiser
|
||||
for seL4, with proof that the specification leads to correctly initialised
|
||||
systems.
|
||||
|
||||
|
||||
[6]: https://trustworthy.systems/publications/nictaabstracts/Matichuk_WM_14.abstract "An Isabelle Proof Method Language"
|
||||
|
||||
|
||||
Hardware requirements
|
||||
---------------------
|
||||
|
||||
Almost all proofs in this repository should work within 4GB of RAM. Proofs
|
||||
involving the C refinement, will usually need the 64bit mode of polyml and
|
||||
about 16GB of RAM.
|
||||
|
||||
The proofs distribute reasonably well over multiple cores, up to about 8
|
||||
cores are useful.
|
||||
|
||||
Running the Proofs
|
||||
------------------
|
||||
|
||||
If Isabelle is set up correctly, a full test for the proofs in this repository
|
||||
for seL4 on the `ARM` architecture can be run with the command
|
||||
|
||||
L4V_ARCH=ARM ./run_tests
|
||||
|
||||
from the directory `l4v/`.
|
||||
|
||||
Set the environment variable `L4V_ARCH` to one of `ARM`, `ARM_HYP`, `X64`,
|
||||
`RISCV64`, or `AARCH64` to get the proofs for the respective architecture. `ARM`
|
||||
has the most complete set of proofs, the other architectures tend to support
|
||||
only a subset of the proof sessions defined for `ARM`.
|
||||
|
||||
Not all of the proof sessions can be built directly with the `isabelle build`
|
||||
command. The seL4 proofs depend on Isabelle specifications that are generated
|
||||
from the C source code and Haskell model. Therefore, it is recommended to always
|
||||
build using the `run_tests` command or the supplied Makefiles, which will ensure
|
||||
that these generated specs are up to date.
|
||||
|
||||
To do this, enter one level under the `l4v/` directory and run `make <session-name>`.
|
||||
For example, to build the abstract specification, do
|
||||
|
||||
export L4V_ARCH=ARM
|
||||
cd l4v/spec
|
||||
make ASpec
|
||||
|
||||
See the `HEAPS` variable in the corresponding `Makefile` for available targets.
|
||||
The sessions that directly depend on generated sources are `ASpec`, `ExecSpec`,
|
||||
and `CKernel`. These, and all sessions that depend on them, need to be run using
|
||||
`run_tests` or `make`.
|
||||
|
||||
Proof sessions that do not depend on generated inputs can be built directly with
|
||||
|
||||
./isabelle/bin/isabelle build -d . -v -b <session name>
|
||||
|
||||
from the directory `l4v/`. For available sessions and their dependencies, see
|
||||
the corresponding `ROOT` files in this repository. There is roughly one session
|
||||
corresponding to each major directory in the repository.
|
||||
|
||||
For interactively exploring, say the invariant proof of the abstract
|
||||
specification on `ARM`, note that in `proof/ROOT` the parent session for
|
||||
`AInvs` is `ASpec` and therefore run:
|
||||
|
||||
export L4V_ARCH=ARM
|
||||
./run_tests ASpec
|
||||
./isabelle/bin/isabelle jedit -d . -R AInvs
|
||||
|
||||
or, if you prefer `make`:
|
||||
|
||||
export L4V_ARCH=ARM
|
||||
cd spec; make ASpec
|
||||
../isabelle/bin/isabelle jedit -d . -R AInvs
|
||||
|
||||
in `l4v/` and open one of the files in `proof/invariant-abstract`.
|
||||
This branch provides a port of AutoCorres (and AutoCorres only) to Isabelle 2023. This
|
||||
branch is now obsolete, as the upstream repository supports Isabelle 2023.
|
||||
|
|
|
@ -0,0 +1,189 @@
|
|||
<!--
|
||||
Copyright 2020, Data61, CSIRO (ABN 41 687 119 230)
|
||||
|
||||
SPDX-License-Identifier: CC-BY-SA-4.0
|
||||
-->
|
||||
|
||||
[![DOI][0]](http://dx.doi.org/10.5281/zenodo.591732)
|
||||
[![CI](https://github.com/seL4/l4v/actions/workflows/push.yml/badge.svg)](https://github.com/seL4/l4v/actions/workflows/push.yml)
|
||||
[![Proofs](https://github.com/seL4/l4v/actions/workflows/proof-deploy.yml/badge.svg)](https://github.com/seL4/l4v/actions/workflows/proof-deploy.yml)
|
||||
[![Weekly Clean](https://github.com/seL4/l4v/actions/workflows/weekly-clean.yml/badge.svg)](https://github.com/seL4/l4v/actions/workflows/weekly-clean.yml)
|
||||
[![External](https://github.com/seL4/l4v/actions/workflows/external.yml/badge.svg)](https://github.com/seL4/l4v/actions/workflows/external.yml)
|
||||
|
||||
MCS:\
|
||||
[![CI](https://github.com/seL4/l4v/actions/workflows/push.yml/badge.svg?branch=rt)](https://github.com/seL4/l4v/actions/workflows/push.yml)
|
||||
[![RT Proofs](https://github.com/seL4/l4v/actions/workflows/proof.yml/badge.svg?branch=rt)](https://github.com/seL4/l4v/actions/workflows/proof.yml)
|
||||
|
||||
[0]: https://zenodo.org/badge/doi/10.5281/zenodo.591732.svg
|
||||
|
||||
|
||||
[The L4.verified Proofs][1]
|
||||
===========================
|
||||
|
||||
This is the L4.verified git repository with formal specifications and
|
||||
proofs for the seL4 microkernel.
|
||||
|
||||
Most proofs in this repository are conducted in the interactive proof
|
||||
assistant [Isabelle/HOL][2]. For an introduction to Isabelle, see its
|
||||
[official website][2] and [documentation][3].
|
||||
|
||||
[1]: https://github.com/seL4/l4v "L4.verified Repository"
|
||||
[2]: https://isabelle.in.tum.de "Isabelle Website"
|
||||
[3]: https://isabelle.in.tum.de/documentation.html "Isabelle Documentation"
|
||||
|
||||
<a name="setup"></a>
|
||||
Setup
|
||||
-----
|
||||
|
||||
This repository is meant to be used as part of a Google [repo][5] setup. Instead
|
||||
of cloning it directly, please follow the directions for software dependencies
|
||||
and Isabelle installation in the [setup.md](docs/setup.md) file in the `docs`
|
||||
directory.
|
||||
|
||||
[5]: https://gerrit.googlesource.com/git-repo/+/HEAD/README.md
|
||||
|
||||
Contributing
|
||||
------------
|
||||
|
||||
Contributions to this repository are welcome.
|
||||
Please read [`CONTRIBUTING.md`](CONTRIBUTING.md) for details.
|
||||
|
||||
Overview
|
||||
--------
|
||||
|
||||
The repository is organised as follows.
|
||||
|
||||
* [`docs`](docs/): documentation on conventions, style, etc.
|
||||
|
||||
* [`spec`](spec/): a number of different formal specifications of seL4
|
||||
* [`abstract`](spec/abstract/): the functional abstract specification of seL4
|
||||
* [`sep-abstract`](spec/sep-abstract/): an abstract specification for a reduced
|
||||
version of seL4 that is configured as a separation kernel
|
||||
* [`haskell`](spec/haskell/): Haskell model of the seL4 kernel, kept in sync
|
||||
with the C code
|
||||
* [`machine`](spec/machine/): the machine interface of these two specifications
|
||||
* [`cspec`](spec/cspec/): the entry point for automatically translating the seL4 C code
|
||||
into Isabelle
|
||||
* [`capDL`](spec/capDL/): a specification of seL4 that abstracts from memory content and
|
||||
concrete execution behaviour, modelling the protection state of the
|
||||
system in terms of capabilities. This specification corresponds to the
|
||||
capability distribution language *capDL* that can be used to initialise
|
||||
user-level systems on top of seL4.
|
||||
* [`take-grant`](spec/take-grant/): a formalisation of the classical take-grant security
|
||||
model, applied to seL4, but not connected to the code of seL4.
|
||||
|
||||
* There are additional specifications that are not tracked in this repository,
|
||||
but are generated from other files:
|
||||
* [`design`](spec/design/): the design-level specification of seL4,
|
||||
generated from the Haskell model.
|
||||
* [`c`](spec/cspec/c/): the C code of the seL4 kernel, preprocessed into a form that
|
||||
can be read into Isabelle. This is generated from the [seL4 repository](https://github.com/seL4/seL4).
|
||||
|
||||
* [`proof`](proof/): the seL4 proofs
|
||||
* [`invariant-abstract`](proof/invariant-abstract/): invariants of the seL4 abstract specification
|
||||
* [`refine`](proof/refine/): refinement between abstract and design specifications
|
||||
* [`crefine`](proof/crefine/): refinement between design specification and C semantics
|
||||
* [`access-control`](proof/access-control/): integrity and authority confinement proofs
|
||||
* [`infoflow`](proof/infoflow/): confidentiality and intransitive non-interference proofs
|
||||
* [`asmrefine`](proof/asmrefine/): Isabelle/HOL part of the seL4 binary verification
|
||||
* [`drefine`](proof/drefine/): refinement between capDL and abstract specification
|
||||
* [`sep-capDL`](proof/sep-capDL/): a separation logic instance on capDL
|
||||
* [`capDL-api`](proof/capDL-api/): separation logic specifications of selected seL4 APIs
|
||||
|
||||
* [`lib`](lib/): generic proof libraries, proof methods and tools. Among these,
|
||||
further libraries for fixed-size machine words, a formalisation of state
|
||||
monads with nondeterminism and exceptions, a generic verification condition
|
||||
generator for monads, a recursive invariant prover for these (`crunch`), an
|
||||
abstract separation logic formalisation, a prototype of the [Eisbach][6] proof
|
||||
method language, a prototype `levity` refactoring tool, and others.
|
||||
|
||||
* [`tools`](tools/): larger, self-contained proof tools
|
||||
* [`asmrefine`](tools/asmrefine/): the generic Isabelle/HOL part of the binary
|
||||
verification tool
|
||||
* [`c-parser`](tools/c-parser/): a parser from C into the Simpl language in Isabelle/HOL.
|
||||
Includes a C memory model.
|
||||
* [`autocorres`](tools/autocorres/): an automated, proof-producing abstraction tool from
|
||||
C into higher-level Isabelle/HOL functions, based on the C parser above
|
||||
* [`haskell-translator`](tools/haskell-translator/): a basic python script for converting the Haskell
|
||||
prototype of seL4 into the executable design specification in
|
||||
Isabelle/HOL.
|
||||
|
||||
* [`misc`](misc/): miscellaneous scripts and build tools
|
||||
|
||||
* [`camkes`](camkes/): an initial formalisation of the CAmkES component platform
|
||||
on seL4. Work in progress.
|
||||
|
||||
* [`sys-init`](sys-init/): specification of a capDL-based, user-level system initialiser
|
||||
for seL4, with proof that the specification leads to correctly initialised
|
||||
systems.
|
||||
|
||||
|
||||
[6]: https://trustworthy.systems/publications/nictaabstracts/Matichuk_WM_14.abstract "An Isabelle Proof Method Language"
|
||||
|
||||
|
||||
Hardware requirements
|
||||
---------------------
|
||||
|
||||
Almost all proofs in this repository should work within 4GB of RAM. Proofs
|
||||
involving the C refinement, will usually need the 64bit mode of polyml and
|
||||
about 16GB of RAM.
|
||||
|
||||
The proofs distribute reasonably well over multiple cores, up to about 8
|
||||
cores are useful.
|
||||
|
||||
Running the Proofs
|
||||
------------------
|
||||
|
||||
If Isabelle is set up correctly, a full test for the proofs in this repository
|
||||
for seL4 on the `ARM` architecture can be run with the command
|
||||
|
||||
L4V_ARCH=ARM ./run_tests
|
||||
|
||||
from the directory `l4v/`.
|
||||
|
||||
Set the environment variable `L4V_ARCH` to one of `ARM`, `ARM_HYP`, `X64`,
|
||||
`RISCV64`, or `AARCH64` to get the proofs for the respective architecture. `ARM`
|
||||
has the most complete set of proofs, the other architectures tend to support
|
||||
only a subset of the proof sessions defined for `ARM`.
|
||||
|
||||
Not all of the proof sessions can be built directly with the `isabelle build`
|
||||
command. The seL4 proofs depend on Isabelle specifications that are generated
|
||||
from the C source code and Haskell model. Therefore, it is recommended to always
|
||||
build using the `run_tests` command or the supplied Makefiles, which will ensure
|
||||
that these generated specs are up to date.
|
||||
|
||||
To do this, enter one level under the `l4v/` directory and run `make <session-name>`.
|
||||
For example, to build the abstract specification, do
|
||||
|
||||
export L4V_ARCH=ARM
|
||||
cd l4v/spec
|
||||
make ASpec
|
||||
|
||||
See the `HEAPS` variable in the corresponding `Makefile` for available targets.
|
||||
The sessions that directly depend on generated sources are `ASpec`, `ExecSpec`,
|
||||
and `CKernel`. These, and all sessions that depend on them, need to be run using
|
||||
`run_tests` or `make`.
|
||||
|
||||
Proof sessions that do not depend on generated inputs can be built directly with
|
||||
|
||||
./isabelle/bin/isabelle build -d . -v -b <session name>
|
||||
|
||||
from the directory `l4v/`. For available sessions and their dependencies, see
|
||||
the corresponding `ROOT` files in this repository. There is roughly one session
|
||||
corresponding to each major directory in the repository.
|
||||
|
||||
For interactively exploring, say the invariant proof of the abstract
|
||||
specification on `ARM`, note that in `proof/ROOT` the parent session for
|
||||
`AInvs` is `ASpec` and therefore run:
|
||||
|
||||
export L4V_ARCH=ARM
|
||||
./run_tests ASpec
|
||||
./isabelle/bin/isabelle jedit -d . -R AInvs
|
||||
|
||||
or, if you prefer `make`:
|
||||
|
||||
export L4V_ARCH=ARM
|
||||
cd spec; make ASpec
|
||||
../isabelle/bin/isabelle jedit -d . -R AInvs
|
||||
|
||||
in `l4v/` and open one of the files in `proof/invariant-abstract`.
|
2
ROOTS
2
ROOTS
|
@ -11,7 +11,7 @@ lib/Basics
|
|||
lib/Eisbach_Tools
|
||||
lib/ML_Utils
|
||||
lib/Monads
|
||||
lib/Word_Lib
|
||||
lib/Word_Lib_l4v
|
||||
lib/sep_algebra
|
||||
lib/EVTutorial
|
||||
docs
|
||||
|
|
|
@ -0,0 +1,392 @@
|
|||
<!--
|
||||
Copyright 2023, Proofcraft Pty Ltd
|
||||
|
||||
SPDX-License-Identifier: CC-BY-SA-4.0
|
||||
-->
|
||||
|
||||
# Commit Messages
|
||||
|
||||
## Objective
|
||||
|
||||
The l4v repository is relatively large, active, and long-lived. It has a public
|
||||
history of about one decade, and an additional decade of unreleased private
|
||||
history. It will hopefully live on for another 20 years.
|
||||
|
||||
This means that the commit history is important. Examples of questions the commit
|
||||
history should be able to answer reasonably quickly and painlessly are:
|
||||
|
||||
- > Is this written in a strange way for good reasons, or were we just in a hurry?
|
||||
> What was the reason? Does it still apply?
|
||||
|
||||
- > When did we change doing things this way and why?
|
||||
|
||||
- > Has this always been broken or was there a seemingly unrelated change that broke it?
|
||||
|
||||
- > How long did it take to finish this proof?
|
||||
|
||||
- > How much change was necessary to do this proof?
|
||||
|
||||
- > Where did this library lemma come from?
|
||||
|
||||
## General
|
||||
|
||||
The [seL4 repository guidelines][git-conventions] apply to the `l4v` repository,
|
||||
with the following exceptions and additions:
|
||||
|
||||
- header can be max 78 characters
|
||||
- body is wrapped at 78 characters
|
||||
- we use tags in the header to indicate which part of the repository
|
||||
the commit applies to
|
||||
|
||||
We are using 78 for the header because of the tags, which take up some space. If
|
||||
you can manage to stay within 50 characters anyway, that is appreciated, but it's
|
||||
not always practical. Using a body wrap of 72 is also allowed, since that is the
|
||||
default for other seL4 repositories.
|
||||
|
||||
We use tags, because the repository is relatively large and most commits only
|
||||
apply to small parts of it. The tags make it easy to identify relevance of a
|
||||
commit at a glance while scanning through a large number of commits.
|
||||
|
||||
The general guidelines prescribe capitalising the commit header. We do not
|
||||
capitalise the tag or after the tag, but we do capitalise the (rare) cases where
|
||||
there is no tag in the commit header.
|
||||
|
||||
## Header and Body
|
||||
|
||||
There is good general advice on [commit message writing][commit-messages]
|
||||
available on the internet and it is as relevant to proofs as it is to code.
|
||||
You should read it, it's not long and it's good advice.
|
||||
|
||||
Repeating some high-level points:
|
||||
|
||||
- Use imperative voice (e.g. `proof: rename lemma foo to bar`)
|
||||
- The header should be a short summary, focusing on "what"
|
||||
- The body should explain what is going on in more detail (also in imperative
|
||||
voice), but much more importantly *why* it is going on (is `bar` more
|
||||
consistent than `foo`? Is the name `foo` needed for something else? Does `bar`
|
||||
just better describe what is going on?).
|
||||
- You are trying to explain things to your future self or a future colleague
|
||||
5-10 years from now. You can assume knowledge of the repository in general,
|
||||
but you should not assume specific context that is obvious to you right now,
|
||||
but that will not be known to a different person 5 years from now.
|
||||
|
||||
## Tags
|
||||
|
||||
- We use tags to indicate which part of the repository the commit applies to,
|
||||
and if it is architecture-specific, then to which architecture it applies to.
|
||||
|
||||
- We do not usually use branch tags, because git branches are ephemeral and we
|
||||
are using rebase branches for most of our work. The one exception is the `rt`
|
||||
branch, which has been alive for over half a decade. For this branch, we allow
|
||||
merge commits (from branch `master` into `rt` only), and we want to be able to
|
||||
reconstruct a rebase history from that at the end of the branch's life.
|
||||
|
||||
This means, we do use the tag `rt` for commits that only make sense on this
|
||||
branch. If you could apply the commit to the master branch directly (e.g. you're
|
||||
adding a lemma to a library), it should not get the tag. Otherwise it should.
|
||||
|
||||
### Tag Examples
|
||||
|
||||
The main tags we use are mostly the directory names of the main proof something
|
||||
is in, e.g. `refine`, `crefine`, `sys-init`, `camkes`. For some of these, there
|
||||
are abbreviations, mainly `aspec` for the abstract spec and `ainvs` for the
|
||||
abstract invariants.
|
||||
|
||||
For large directories that have logical sub parts, we use slashes, e.g.
|
||||
`lib/monads`. Not so much because the change is in that directory, but because
|
||||
we want to see that it's a library change and applies to the monad part of the
|
||||
library.
|
||||
|
||||
If the change applies to many proofs, for instance large lemma renaming commits,
|
||||
we use tags such as `proof` and `spec`.
|
||||
|
||||
We combine tags with `+` if a change applies to multiple parts, e.g.
|
||||
`clib+crefine` or `lib+proof`.
|
||||
|
||||
If something is specific to an architecture we preface the tag with the
|
||||
architecture, e.g. `arm refine:` or `aarch64 aspec+ainvs:`. The current
|
||||
architecture tags are: `arm`, `arm-hyp`, `x64`, `riscv`, `aarch64`.
|
||||
Please use these spellings only.
|
||||
|
||||
More tag examples:
|
||||
|
||||
- `trivial`: for small trivial changes like fixing a typo, where no proofs or
|
||||
specs have changed, i.e.\ that would not need a proof CI run.
|
||||
- `cleanup:` for cleanups that do not change proof content
|
||||
- `github:` for GitHub CI changes
|
||||
- `run_tests`: for changes to the top-level `run_tests` file
|
||||
- `isabelle20xx`: for easily identifying commits related to Isabelle updates
|
||||
|
||||
For more consistency there is an order between tags. More abstract/general
|
||||
things should come first, e.g. `lib` < `aspec` < `haskell` < `ainvs` < `refine`
|
||||
`orphanage` < `crefine`. Or `dspec` < `drefine` and `access` < `infoflow`. Specs
|
||||
< proofs and `infoflow` < refinement proofs. This is not a total order, it's Ok
|
||||
to use your own judgement.
|
||||
|
||||
Because `lib` has many subdirectories and separate parts, it's fine to use
|
||||
session names as tags there to shorten things a bit, e.g. `clib`, `monads`,
|
||||
`word_lib` instead of `lib/clib`, `lib/monads`, or `lib/word_lib`. This makes
|
||||
sense when the tags are session names.
|
||||
|
||||
See also the longer example list of [good tags](#good-tags) below.
|
||||
|
||||
## Tips and Tools
|
||||
|
||||
### Looking at history
|
||||
|
||||
The main tools to interact with the git history are browsing it on GitHub and
|
||||
various forms of `git log`:
|
||||
|
||||
```sh
|
||||
git log --oneline # show only headings
|
||||
git log # show commit info with author, date, message
|
||||
git log --stat # additionally show which files have changed
|
||||
git log --p # additionally show full diff
|
||||
```
|
||||
|
||||
For all of these, you can supply a path to restrict the log to a certain file
|
||||
or directory in the repo. You can also supply a branch, or a commit range like
|
||||
`master..rt` to restrict the output.
|
||||
|
||||
Especially `git log --oneline` is useful for quickly getting an overview. Example
|
||||
output:
|
||||
|
||||
```
|
||||
b3c6df48a clib: improve ccorres_While
|
||||
49ff8457f clib+crefine: improve and consolidate variants of ccorres_to_vcg
|
||||
8c433c085 clib: add some rules for hoarep
|
||||
82b954817 clib: improve ccorres_call_getter_setter
|
||||
8c59df449 lib/monads: remove more uses of _tac methods
|
||||
563232397 run_tests+proof: exclude SimplExportAndRefine for AARCH64
|
||||
1cce5b3ff proof: switch AArch64 quick_and_dirty from Refine to CRefine
|
||||
402a8342d run_tests: enable CBaseRefine for AARCH64
|
||||
32a672412 aarch64 cspec: add Kernel_C.thy to base CKernel image on
|
||||
b2cd1ce4a aarch64 asmrefine: copy ArchSetup from RISCV64
|
||||
67c0109b7 lib/monads: avoid clarsimp as initial Isar method
|
||||
bd5026438 lib/monads: fix remaining document preparation issues
|
||||
4d0086567 lib/monads: add new Trace_* files to ROOT
|
||||
598e19dd6 lib/monads: coherent document structure
|
||||
4cbfb4ab0 lib/monads: minor style + warning cleanup
|
||||
b2dd5db6d lib/monads: fix document preparation issues
|
||||
03a045309 lib/monads: add AFP document setup
|
||||
d0bab9c79 misc/scripts: remove Darwin cpp wrapper
|
||||
```
|
||||
|
||||
You can very quickly see that C verification has been active recently, that
|
||||
new tests were added, that AARCH64 refinement proofs have been done, and that there was
|
||||
some work to do with the AFP and the monad library. You can see that nothing
|
||||
has happened with the system initialiser or other user-level proofs, and that there
|
||||
are no changes that should affect, for instance, the C parser.
|
||||
|
||||
You only see such things quickly when the messages are consistent and follow the
|
||||
same kind of pattern. It's not so important what the pattern is. It is important
|
||||
that it is consistent.
|
||||
|
||||
Note in e.g. `proof: switch AArch64 quick_and_dirty from Refine to CRefine` that
|
||||
the architecture tag is used only when the change is specific to files for that
|
||||
architecture. In this commit, the overall ROOTS file is changed, so it shouldn't
|
||||
get the architecture tag.
|
||||
|
||||
### What tag should I pick?
|
||||
|
||||
If you're uncertain what tag to pick for a certain file or directory, the
|
||||
easiest way to figure it out is to do
|
||||
|
||||
```sh
|
||||
git log --oneline <that-file>
|
||||
```
|
||||
|
||||
Do your tags the same way people have done before. This will make the pattern
|
||||
consistent and should be reasonably easy to read even if it's not perfect. Look
|
||||
at a few commits, not only a single one, so you can course correct instead of
|
||||
amplify if someone happened to invent a new flavour.
|
||||
|
||||
You can even do that when you're in the middle of writing a commit message, it's
|
||||
safe to interrupt `git commit` with `Ctrl-Z`, then `bg` in your shell to put
|
||||
it into the background, and then `git log --online <file>` to see the history.
|
||||
|
||||
Any operation that doesn't change the state of the repository is fine (even
|
||||
those that do are fine, but then the commit will probably fail).
|
||||
|
||||
When you're looking into history for tags, use mainly commits from roughly 2018
|
||||
onwards. We weren't very consistent with tags before that. The more recent the
|
||||
more consistent.
|
||||
|
||||
### Good tags
|
||||
|
||||
Here's a list of tags that have been used in the past and that follow the
|
||||
guidelines above.
|
||||
|
||||
```
|
||||
aarch64 ainvs
|
||||
aarch64 ainvs+refine
|
||||
aarch64 asmrefine
|
||||
aarch64 aspec
|
||||
aarch64 aspec+ainvs
|
||||
aarch64 aspec+design
|
||||
aarch64 aspec+haskell
|
||||
aarch64 aspec+machine
|
||||
aarch64 cspec
|
||||
aarch64 design
|
||||
aarch64 design+machine
|
||||
aarch64 haskell
|
||||
aarch64 haskell+design
|
||||
aarch64 haskell+machine
|
||||
aarch64 machine+ainvs
|
||||
aarch64 proof
|
||||
aarch64 refine
|
||||
aarch64 spec+haskell
|
||||
access+infoflow+drefine
|
||||
access+infoflow+crefine+drefine
|
||||
ainvs
|
||||
ainvs+crefine
|
||||
ainvs+refine
|
||||
arm aspec+design
|
||||
arm access
|
||||
arm access+infoflow
|
||||
arm ainvs
|
||||
arm aspec
|
||||
arm crefine
|
||||
arm haskell
|
||||
arm infoflow
|
||||
arm refine
|
||||
arm+arm-hyp crefine
|
||||
arm+arm-hyp machine
|
||||
arm-hyp aspec
|
||||
arm-hyp aspec+design
|
||||
arm-hyp ainvs
|
||||
arm-hyp crefine
|
||||
arm-hyp design
|
||||
arm-hyp haskell
|
||||
arm-hyp haskell+refine
|
||||
arm-hyp machine
|
||||
arm-hyp refine
|
||||
asmrefine
|
||||
aspec
|
||||
aspec+access
|
||||
aspec+ainvs
|
||||
aspec+design+haskell
|
||||
aspec+haskell
|
||||
autocorres
|
||||
bisim
|
||||
c-parser
|
||||
c-parser+autocorres
|
||||
c-parser+crefine
|
||||
camkes
|
||||
capDL
|
||||
ckernel
|
||||
cleanup
|
||||
cleanup ainvs
|
||||
clib
|
||||
clib+crefine
|
||||
crefine
|
||||
crefine+capDL
|
||||
cspec
|
||||
design
|
||||
docs
|
||||
dpolicy
|
||||
drefine
|
||||
dspec
|
||||
dspec+drefine+infoflow
|
||||
github
|
||||
haskell
|
||||
haskell+design
|
||||
haskell-translator
|
||||
infoflow
|
||||
infoflow+crefine
|
||||
infoflow+dpolicy+cdl-refine
|
||||
isabelle-2021
|
||||
isabelle-2021 access
|
||||
isabelle-2021 c-parser
|
||||
lib
|
||||
lib+READMEs
|
||||
lib+aarch64 ainvs
|
||||
lib+aarch64 refine
|
||||
lib+ainvs
|
||||
lib+ainvs+aarch64 ainvs
|
||||
lib+ainvs+aarch64 refine
|
||||
lib+ainvs+access+refine
|
||||
lib+autocorres
|
||||
lib+c-parser
|
||||
lib+crefine
|
||||
lib+proof
|
||||
lib+proof+autocorres
|
||||
lib+proof+tools
|
||||
lib+proof
|
||||
lib+refine+crefine
|
||||
lib+spec
|
||||
lib+spec+proof+autocorres
|
||||
lib+spec+proof
|
||||
lib+sysinit
|
||||
lib+tools
|
||||
lib/apply_debug
|
||||
lib/clib
|
||||
lib/corres_method
|
||||
lib/crunch
|
||||
lib/monads
|
||||
lib/sep_algebra
|
||||
license
|
||||
misc
|
||||
misc/regression
|
||||
misc/scripts
|
||||
misc/stats
|
||||
proof
|
||||
proof+autocorres
|
||||
proof/Makefile
|
||||
proof/ROOT
|
||||
refine
|
||||
refine cleanup
|
||||
refine+crefine
|
||||
refine+orphanage
|
||||
riscv
|
||||
riscv access
|
||||
riscv ainvs
|
||||
riscv ainvs+access
|
||||
riscv aspec
|
||||
riscv aspec+ainvs
|
||||
riscv aspec+haskell
|
||||
riscv crefine
|
||||
riscv cspec+crefine
|
||||
riscv design
|
||||
riscv haskell
|
||||
riscv haskell+design
|
||||
riscv haskell+proof
|
||||
riscv haskell+refine
|
||||
riscv infoflow
|
||||
riscv machine
|
||||
riscv machine+ainvs
|
||||
riscv machine+design+crefine
|
||||
riscv orphanage
|
||||
riscv platform
|
||||
riscv refine
|
||||
riscv access+infoflow+refine+crefine
|
||||
riscv spec
|
||||
riscv+aarch64 ainvs+refine
|
||||
riscv+x64 crefine
|
||||
riscv64+aarch64 ainvs
|
||||
run_tests
|
||||
run_tests+proof
|
||||
spec+proof
|
||||
style
|
||||
sys-init
|
||||
tools
|
||||
tools/asmrefine
|
||||
trivial
|
||||
word_lib
|
||||
word_lib+crefine
|
||||
x64 ainvs+refine+crefine
|
||||
x64 aspec
|
||||
x64 crefine
|
||||
x64 design
|
||||
x64 design/skel
|
||||
x64 haskell
|
||||
x64 machine
|
||||
x64 refine
|
||||
x64 refine+crefine
|
||||
```
|
||||
|
||||
Most of these could be prefixed with `rt` if they only made sense on the `rt`
|
||||
branch, e.g. `rt arm ainv+refine:`
|
||||
|
||||
[git-conventions]: https://docs.sel4.systems/processes/git-conventions.html
|
||||
[commit-messages]: https://chris.beams.io/posts/git-commit/
|
|
@ -10,7 +10,7 @@ This command is for searching for theorems. If you are looking for a
|
|||
constant/function instead, look at [find_consts](find-consts.md).
|
||||
|
||||
There is an introduction to the `find_theorems` command in the
|
||||
[Isabelle/HOL tutorial](http://isabelle.in.tum.de/documentation.html).
|
||||
[Isabelle/HOL tutorial](https://isabelle.in.tum.de/documentation.html).
|
||||
Here we cover some additional material and useful patterns.
|
||||
|
||||
`find_theorems` can be written in the theory as a diagnostic command, or
|
||||
|
|
|
@ -243,4 +243,4 @@ proof-context aware than the 'original' indenter. Pressing `ctrl+i` while some
|
|||
subgoal depth and maintaining the relative indentation of multi-line `apply`
|
||||
statements.
|
||||
|
||||
[isabelle]: http://isabelle.in.tum.de
|
||||
[isabelle]: https://isabelle.in.tum.de
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
*)
|
||||
|
||||
theory CLib
|
||||
imports Word_Lib.Many_More
|
||||
imports Word_Lib_l4v.Many_More
|
||||
begin
|
||||
|
||||
lemma nat_diff_less:
|
||||
|
|
|
@ -9,7 +9,7 @@ This session contains basic library theories that are needed in other sessions
|
|||
of this repository, such as [Monads] or [CParser], but that we do not want to
|
||||
put into these sessions to avoid circular session dependencies.
|
||||
|
||||
Dependencies on `Word_Lib` and the Isabelle distribution (e.g. `HOL-Libary`) are
|
||||
Dependencies on `Word_Lib_l4v` and the Isabelle distribution (e.g. `HOL-Libary`) are
|
||||
fine, but avoid introducing any further session dependencies.
|
||||
|
||||
[Monads]: ../Monads/
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
|
||||
chapter Lib
|
||||
|
||||
session Basics (lib) = Word_Lib +
|
||||
session Basics (lib) = Word_Lib_l4v +
|
||||
|
||||
theories
|
||||
CLib
|
|
@ -481,6 +481,18 @@ lemma corres_if3:
|
|||
(if G then a else b) (if G' then c else d)"
|
||||
by simp
|
||||
|
||||
lemma corres_if_strong:
|
||||
"\<lbrakk>\<And>s s'. \<lbrakk>(s, s') \<in> sr; R s; R' s'\<rbrakk> \<Longrightarrow> G = G';
|
||||
\<lbrakk>G; G'\<rbrakk> \<Longrightarrow> corres_underlying sr nf nf' r P P' a c;
|
||||
\<lbrakk>\<not> G; \<not> G'\<rbrakk> \<Longrightarrow> corres_underlying sr nf nf' r Q Q' b d \<rbrakk>
|
||||
\<Longrightarrow> corres_underlying sr nf nf' r
|
||||
(R and (if G then P else Q)) (R' and (if G' then P' else Q'))
|
||||
(if G then a else b) (if G' then c else d)"
|
||||
by (fastforce simp: corres_underlying_def)
|
||||
|
||||
lemmas corres_if_strong' =
|
||||
corres_if_strong[where R=R and P=R and Q=R for R,
|
||||
where R'=R' and P'=R' and Q'=R' for R', simplified]
|
||||
|
||||
text \<open>Some equivalences about liftM and other useful simps\<close>
|
||||
|
||||
|
@ -740,6 +752,11 @@ lemma corres_assert_assume:
|
|||
by (auto simp: bind_def assert_def fail_def return_def
|
||||
corres_underlying_def)
|
||||
|
||||
lemma corres_assert_assume_l:
|
||||
"corres_underlying sr nf nf' rrel P Q (f ()) g
|
||||
\<Longrightarrow> corres_underlying sr nf nf' rrel (P and (\<lambda>s. P')) Q (assert P' >>= f) g"
|
||||
by (force simp: corres_underlying_def assert_def return_def bind_def fail_def)
|
||||
|
||||
lemma corres_assert_gen_asm_cross:
|
||||
"\<lbrakk> \<And>s s'. \<lbrakk>(s, s') \<in> sr; P' s; Q' s'\<rbrakk> \<Longrightarrow> A;
|
||||
A \<Longrightarrow> corres_underlying sr nf nf' r P Q f (g ()) \<rbrakk>
|
||||
|
|
|
@ -17,6 +17,17 @@ imports
|
|||
Local_Method
|
||||
begin
|
||||
|
||||
section \<open>Warnings\<close>
|
||||
|
||||
method_setup not_visible =
|
||||
\<open>Method.text_closure >> (fn m => fn ctxt => fn facts =>
|
||||
let
|
||||
val ctxt' = Context_Position.set_visible false ctxt
|
||||
fun tac st' = method_evaluate m ctxt' facts st'
|
||||
in SIMPLE_METHOD tac facts end)\<close>
|
||||
\<open>set context visibility to false for suppressing warnings in method execution\<close>
|
||||
|
||||
|
||||
section \<open>Debugging methods\<close>
|
||||
|
||||
method print_concl = (match conclusion in P for P \<Rightarrow> \<open>print_term P\<close>)
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
*)
|
||||
|
||||
theory ExtraCorres
|
||||
imports Corres_UL
|
||||
imports Corres_UL DetWPLib
|
||||
begin
|
||||
|
||||
(* FIXME: the S in this rule is mainly to make the induction work, we don't actually need it in
|
||||
|
@ -181,12 +181,17 @@ qed
|
|||
|
||||
text \<open>Some results concerning the interaction of abstract and concrete states\<close>
|
||||
|
||||
definition ex_abs_underlying :: "('a \<times> 'b) set \<Rightarrow> ('a \<Rightarrow> bool) \<Rightarrow> 'b \<Rightarrow> bool" where
|
||||
"ex_abs_underlying sr P s' \<equiv> \<exists>s. (s,s') \<in> sr \<and> P s"
|
||||
|
||||
lemma ex_absI[intro!]:
|
||||
"(s, s') \<in> sr \<Longrightarrow> P s \<Longrightarrow> ex_abs_underlying sr P s'"
|
||||
by (auto simp add: ex_abs_underlying_def)
|
||||
|
||||
lemma corres_u_nofail:
|
||||
"corres_underlying S nf True r P P' f g \<Longrightarrow> (nf \<Longrightarrow> no_fail P f) \<Longrightarrow>
|
||||
no_fail (\<lambda>s'. \<exists>s. (s,s') \<in> S \<and> P s \<and> P' s') g"
|
||||
apply (clarsimp simp add: corres_underlying_def no_fail_def)
|
||||
apply fastforce
|
||||
done
|
||||
"\<lbrakk>corres_underlying S nf True r P P' f g; nf \<Longrightarrow> no_fail P f\<rbrakk>
|
||||
\<Longrightarrow> no_fail (P' and ex_abs_underlying S P) g"
|
||||
by (fastforce simp: corres_underlying_def ex_abs_underlying_def no_fail_def)
|
||||
|
||||
lemma wp_from_corres_u:
|
||||
"\<lbrakk> corres_underlying R nf nf' r G G' f f'; \<lbrace>P\<rbrace> f \<lbrace>Q\<rbrace>; \<lbrace>P'\<rbrace> f' \<lbrace>Q'\<rbrace>; nf \<Longrightarrow> no_fail P f \<rbrakk> \<Longrightarrow>
|
||||
|
@ -204,7 +209,7 @@ lemma wp_from_corres_u_unit:
|
|||
lemma corres_nofail:
|
||||
"corres_underlying state_relation nf True r P P' f g \<Longrightarrow> (nf \<Longrightarrow> no_fail P f) \<Longrightarrow>
|
||||
no_fail (\<lambda>s'. \<exists>s. (s,s') \<in> state_relation \<and> P s \<and> P' s') g"
|
||||
by (rule corres_u_nofail)
|
||||
by (fastforce intro: no_fail_pre corres_u_nofail simp: ex_abs_underlying_def)
|
||||
|
||||
lemma wp_from_corres_unit:
|
||||
"\<lbrakk> corres_underlying state_relation nf nf' r G G' f f';
|
||||
|
@ -213,13 +218,6 @@ lemma wp_from_corres_unit:
|
|||
f' \<lbrace>\<lambda>_ s'. \<exists>s. (s,s') \<in> state_relation \<and> Q s \<and> Q' s'\<rbrace>"
|
||||
by (auto intro!: wp_from_corres_u_unit)
|
||||
|
||||
definition ex_abs_underlying :: "('a \<times> 'b) set \<Rightarrow> ('a \<Rightarrow> bool) \<Rightarrow> 'b \<Rightarrow> bool" where
|
||||
"ex_abs_underlying sr P s' \<equiv> \<exists>s. (s,s') \<in> sr \<and> P s"
|
||||
|
||||
lemma ex_absI[intro!]:
|
||||
"(s, s') \<in> sr \<Longrightarrow> P s \<Longrightarrow> ex_abs_underlying sr P s'"
|
||||
by (auto simp add: ex_abs_underlying_def)
|
||||
|
||||
lemma corres_underlying_split_ex_abs:
|
||||
assumes ac: "corres_underlying srel nf nf' r' G G' a c"
|
||||
assumes bd: "\<forall>rv rv'. r' rv rv' \<longrightarrow>
|
||||
|
@ -251,6 +249,100 @@ lemma hoare_from_abs_inv:
|
|||
using assms
|
||||
by (fastforce intro: hoare_from_abs[where R=\<top> and S=\<top> and R'=\<top> and Q="\<lambda>_. P" , simplified])
|
||||
|
||||
lemma corres_from_valid:
|
||||
assumes nf': "nf' \<Longrightarrow> no_fail (P' and ex_abs_underlying srel P) f'"
|
||||
assumes
|
||||
"\<And>s. \<lbrace>\<lambda>s'. P s \<and> P' s' \<and> (s, s') \<in> srel\<rbrace>
|
||||
f' \<lbrace>\<lambda>rv' t'. \<exists>(rv, t) \<in> fst (f s). (t, t') \<in> srel \<and> rrel rv rv'\<rbrace>"
|
||||
shows "corres_underlying srel nf nf' rrel P P' f f'"
|
||||
using assms
|
||||
by (fastforce simp: corres_underlying_def valid_def no_fail_def)
|
||||
|
||||
lemma corres_from_valid_det:
|
||||
assumes det: "det_wp P f"
|
||||
assumes nf': "nf' \<Longrightarrow> no_fail (P' and ex_abs_underlying srel P) f'"
|
||||
assumes valid:
|
||||
"\<And>s rv t.
|
||||
\<lbrakk>fst (f s) = {(rv, t)}; P s\<rbrakk>
|
||||
\<Longrightarrow> \<lbrace>\<lambda>s'. P' s' \<and> (s, s') \<in> srel\<rbrace> f' \<lbrace>\<lambda>rv' t'. (t, t') \<in> srel \<and> rrel rv rv'\<rbrace>"
|
||||
shows "corres_underlying srel nf nf' rrel P P' f f'"
|
||||
apply (clarsimp simp: corres_underlying_def)
|
||||
apply (intro conjI)
|
||||
apply (insert det)
|
||||
apply (clarsimp simp: det_wp_def)
|
||||
apply (force dest: use_valid[OF _ valid])
|
||||
apply (fastforce dest: nf' simp: no_fail_def ex_abs_underlying_def)
|
||||
done
|
||||
|
||||
lemma corres_noop_ex_abs:
|
||||
assumes P: "\<And>s. P s \<Longrightarrow> \<lbrace>\<lambda>s'. (s, s') \<in> sr \<and> P' s'\<rbrace> f \<lbrace>\<lambda>rv s'. (s, s') \<in> sr \<and> r x rv\<rbrace>"
|
||||
assumes nf': "nf' \<Longrightarrow> no_fail (P' and ex_abs_underlying sr P) f"
|
||||
shows "corres_underlying sr nf nf' r P P' (return x) f"
|
||||
apply (simp add: corres_underlying_def return_def)
|
||||
apply clarsimp
|
||||
apply (frule P)
|
||||
apply (insert nf')
|
||||
apply (fastforce simp: valid_def no_fail_def ex_abs_underlying_def)
|
||||
done
|
||||
|
||||
lemma corres_symb_exec_r_conj_ex_abs:
|
||||
assumes z: "\<And>rv. corres_underlying sr nf nf' r Q (R' rv) x (y rv)"
|
||||
assumes y: "\<lbrace>Q'\<rbrace> m \<lbrace>R'\<rbrace>"
|
||||
assumes x: "\<And>s. Q s \<Longrightarrow> \<lbrace>\<lambda>s'. (s, s') \<in> sr \<and> P' s'\<rbrace> m \<lbrace>\<lambda>rv s'. (s, s') \<in> sr\<rbrace>"
|
||||
assumes nf: "nf' \<Longrightarrow> no_fail (P' and ex_abs_underlying sr Q) m"
|
||||
shows "corres_underlying sr nf nf' r Q (P' and Q') x (m >>= (\<lambda>rv. y rv))"
|
||||
proof -
|
||||
have P: "corres_underlying sr nf nf' dc Q P' (return undefined) m"
|
||||
apply (rule corres_noop_ex_abs)
|
||||
apply (simp add: x)
|
||||
apply (erule nf)
|
||||
done
|
||||
show ?thesis
|
||||
apply (rule corres_guard_imp)
|
||||
apply (subst return_bind[symmetric], rule corres_split[OF P])
|
||||
apply (rule z)
|
||||
apply wp
|
||||
apply (rule y)
|
||||
apply simp+
|
||||
done
|
||||
qed
|
||||
|
||||
lemmas corres_symb_exec_r_conj_ex_abs_forwards =
|
||||
corres_symb_exec_r_conj_ex_abs[where P'=P' and Q'=P' for P', simplified]
|
||||
|
||||
lemma gets_the_corres_ex_abs':
|
||||
"\<lbrakk>no_ofail P a; no_ofail (P' and ex_abs_underlying sr P) b\<rbrakk> \<Longrightarrow>
|
||||
corres_underlying sr False True r P P' (gets_the a) (gets_the b)
|
||||
= (\<forall>s s'. P s \<and> P' s' \<and> (s, s') \<in> sr \<longrightarrow> r (the (a s)) (the (b s')))"
|
||||
by (fastforce simp: gets_the_def no_ofail_def corres_underlying_def split_def exec_gets
|
||||
assert_opt_def fail_def return_def ex_abs_underlying_def)
|
||||
|
||||
lemmas gets_the_corres_ex_abs = gets_the_corres_ex_abs'[THEN iffD2]
|
||||
|
||||
lemma gets_the_corres':
|
||||
"\<lbrakk>no_ofail P a; no_ofail P' b\<rbrakk> \<Longrightarrow>
|
||||
corres_underlying sr False True r P P' (gets_the a) (gets_the b)
|
||||
= (\<forall>s s'. P s \<and> P' s' \<and> (s, s') \<in> sr \<longrightarrow> r (the (a s)) (the (b s')))"
|
||||
apply (erule gets_the_corres_ex_abs')
|
||||
apply (fastforce intro: no_ofail_pre_imp)
|
||||
done
|
||||
|
||||
lemmas gets_the_corres = gets_the_corres'[THEN iffD2]
|
||||
|
||||
lemma gets_the_corres_relation:
|
||||
"\<lbrakk>no_ofail P f; corres_underlying sr False True r P P' (gets_the f) (gets_the f');
|
||||
P s; P' s'; (s, s') \<in> sr\<rbrakk>
|
||||
\<Longrightarrow> r (the (f s)) (the (f' s'))"
|
||||
apply (rule_tac P=P and a=f and b=f' and P'=P'
|
||||
in gets_the_corres_ex_abs'[THEN iffD1, rule_format];
|
||||
fastforce?)
|
||||
apply (rule no_ofail_gets_the_eq[THEN iffD2])
|
||||
apply (fastforce intro: corres_u_nofail)
|
||||
done
|
||||
|
||||
|
||||
\<comment> \<open>Some @{term corres_underlying} rules for @{term whileLoop}\<close>
|
||||
|
||||
lemma in_whileLoop_corres:
|
||||
assumes body_corres:
|
||||
"\<And>r r'. rrel r r' \<Longrightarrow>
|
||||
|
@ -422,6 +514,10 @@ lemma ifM_corres:
|
|||
apply (wpsimp wp: abs_valid conc_valid hoare_vcg_if_lift2)+
|
||||
done
|
||||
|
||||
lemmas ifM_corres' =
|
||||
ifM_corres[where A=A and B=A and C=A for A, simplified,
|
||||
where A'=A' and B'=A' and C'=A' for A', simplified]
|
||||
|
||||
lemma orM_corres:
|
||||
"\<lbrakk>corres_underlying srel nf nf' (=) A A' a a'; corres_underlying srel nf nf' (=) R R' b b';
|
||||
\<lbrace>B\<rbrace> a \<lbrace>\<lambda>c s. \<not> c \<longrightarrow> R s\<rbrace>; \<lbrace>B'\<rbrace> a' \<lbrace>\<lambda>c s. \<not> c \<longrightarrow> R' s\<rbrace>\<rbrakk>
|
||||
|
@ -432,6 +528,9 @@ lemma orM_corres:
|
|||
apply (wpsimp | fastforce)+
|
||||
done
|
||||
|
||||
lemmas orM_corres' =
|
||||
orM_corres[where A=A and B=A for A, simplified, where A'=A' and B'=A' for A', simplified]
|
||||
|
||||
lemma andM_corres:
|
||||
"\<lbrakk>corres_underlying srel nf nf' (=) A A' a a'; corres_underlying srel nf nf' (=) Q Q' b b';
|
||||
\<lbrace>B\<rbrace> a \<lbrace>\<lambda>c s. c \<longrightarrow> Q s\<rbrace>; \<lbrace>B'\<rbrace> a' \<lbrace>\<lambda>c s. c \<longrightarrow> Q' s\<rbrace>\<rbrakk>
|
||||
|
@ -451,4 +550,8 @@ lemma notM_corres:
|
|||
apply wpsimp+
|
||||
done
|
||||
|
||||
lemma ifM_to_top_of_bind:
|
||||
"((ifM test true false) >>= z) = ifM test (true >>= z) (false >>= z)"
|
||||
by (force simp: ifM_def bind_def split: if_splits)
|
||||
|
||||
end
|
||||
|
|
|
@ -0,0 +1,479 @@
|
|||
(*
|
||||
* Copyright 2020, Data61, CSIRO (ABN 41 687 119 230)
|
||||
*
|
||||
* SPDX-License-Identifier: BSD-2-Clause
|
||||
*)
|
||||
|
||||
(* Singly-linked lists on heaps or projections from heaps, as predicate and as recursive function.
|
||||
Loosely after ~~/src/HOL/Hoare/Pointer_Examples.thy *)
|
||||
|
||||
theory Heap_List
|
||||
imports Main "HOL-Library.Prefix_Order" ListLibLemmas
|
||||
begin
|
||||
|
||||
(* Given a heap projection that returns the next-pointer for an object at address x,
|
||||
given a start pointer x, and an end pointer y, determine if the given list
|
||||
is the path of addresses visited when following the next-pointers from x to y *)
|
||||
primrec heap_path :: "('a \<rightharpoonup> 'a) \<Rightarrow> 'a option \<Rightarrow> 'a list \<Rightarrow> 'a option \<Rightarrow> bool" where
|
||||
"heap_path hp x [] y = (x = y)"
|
||||
| "heap_path hp x (a#as) y = (x = Some a \<and> heap_path hp (hp a) as y)"
|
||||
|
||||
(* When a path ends in None, it is a singly-linked list *)
|
||||
abbreviation heap_ls :: "('a \<rightharpoonup> 'a) \<Rightarrow> 'a option \<Rightarrow> 'a list \<Rightarrow> bool" where
|
||||
"heap_ls hp x xs \<equiv> heap_path hp x xs None"
|
||||
|
||||
(* Walk a linked list of next pointers, recording which it visited.
|
||||
Terminates artificially at loops, and otherwise because the address domain is finite *)
|
||||
function heap_walk :: "('a::finite \<rightharpoonup> 'a) \<Rightarrow> 'a option \<Rightarrow> 'a list \<Rightarrow> 'a list" where
|
||||
"heap_walk hp None xs = xs"
|
||||
| "heap_walk hp (Some x) xs = (if x \<in> set xs then xs else heap_walk hp (hp x) (xs@[x]))"
|
||||
by pat_completeness auto
|
||||
|
||||
lemma card_set_UNIV:
|
||||
fixes xs :: "'a::finite list"
|
||||
assumes "x \<notin> set xs"
|
||||
shows "card (set xs) < card(UNIV::'a set)"
|
||||
proof -
|
||||
have "finite (UNIV::'a set)" by simp
|
||||
moreover
|
||||
from assms have "set xs \<subset> UNIV" by blast
|
||||
ultimately
|
||||
show ?thesis by (rule psubset_card_mono)
|
||||
qed
|
||||
|
||||
termination heap_walk
|
||||
by (relation "measure (\<lambda>(_, _, xs). card(UNIV :: 'a set) - card (set xs))";
|
||||
simp add: card_set_UNIV diff_less_mono2)
|
||||
|
||||
lemma heap_path_append[simp]:
|
||||
"heap_path hp start (xs @ ys) end = (\<exists>x. heap_path hp start xs x \<and> heap_path hp x ys end)"
|
||||
by (induct xs arbitrary: start; simp)
|
||||
|
||||
lemma heap_path_None[simp]:
|
||||
"heap_path hp None xs end = (xs = [] \<and> end = None)"
|
||||
by (cases xs, auto)
|
||||
|
||||
lemma heap_ls_unique:
|
||||
"\<lbrakk> heap_ls hp x xs; heap_ls hp x ys \<rbrakk> \<Longrightarrow> xs = ys"
|
||||
by (induct xs arbitrary: ys x; simp) (case_tac ys; clarsimp)
|
||||
|
||||
lemma heap_ls_hd_not_in_tl:
|
||||
"heap_ls hp (hp x) xs \<Longrightarrow> x \<notin> set xs"
|
||||
proof
|
||||
assume "x \<in> set xs"
|
||||
then obtain ys zs where xs: "xs = ys @ x # zs" by (auto simp: in_set_conv_decomp)
|
||||
moreover assume "heap_ls hp (hp x) xs"
|
||||
moreover from this xs have "heap_ls hp (hp x) zs" by clarsimp
|
||||
ultimately show False by (fastforce dest: heap_ls_unique)
|
||||
qed
|
||||
|
||||
lemma heap_ls_distinct:
|
||||
"heap_ls hp x xs \<Longrightarrow> distinct xs"
|
||||
by (induct xs arbitrary: x; clarsimp simp: heap_ls_hd_not_in_tl)
|
||||
|
||||
lemma heap_ls_is_walk':
|
||||
"\<lbrakk> heap_ls hp x xs; set xs \<inter> set ys = {} \<rbrakk> \<Longrightarrow> heap_walk hp x ys = ys @ xs"
|
||||
by (frule heap_ls_distinct) (induct xs arbitrary: x ys; clarsimp)
|
||||
|
||||
lemma heap_ls_is_walk:
|
||||
"heap_ls hp x xs \<Longrightarrow> heap_walk hp x [] = xs"
|
||||
using heap_ls_is_walk' by fastforce
|
||||
|
||||
lemma heap_path_end_unique:
|
||||
"heap_path hp x xs y \<Longrightarrow> heap_path hp x xs y' \<Longrightarrow> y = y'"
|
||||
by (induct xs arbitrary: x; clarsimp)
|
||||
|
||||
lemma heap_path_head:
|
||||
"heap_path hp x xs y \<Longrightarrow> xs \<noteq> [] \<Longrightarrow> x = Some (hd xs)"
|
||||
by (induct xs arbitrary: x; clarsimp)
|
||||
|
||||
lemma heap_path_non_nil_lookup_next:
|
||||
"heap_path hp x (xs@z#ys) y \<Longrightarrow> hp z = (case ys of [] \<Rightarrow> y | _ \<Rightarrow> Some (hd ys))"
|
||||
by (cases ys; fastforce)
|
||||
|
||||
lemma heap_path_prefix:
|
||||
"heap_path hp st ls ed \<Longrightarrow> \<forall>xs\<le>ls. heap_path hp st xs (if xs = [] then st else hp (last xs))"
|
||||
apply clarsimp
|
||||
apply (erule Prefix_Order.prefixE)
|
||||
by (metis append_butlast_last_id heap_path_append heap_path_non_nil_lookup_next list.case(1))
|
||||
|
||||
lemma heap_path_butlast:
|
||||
"heap_path hp st ls ed \<Longrightarrow> ls \<noteq> [] \<Longrightarrow> heap_path hp st (butlast ls) (Some (last ls))"
|
||||
by (induct ls rule: rev_induct; simp)
|
||||
|
||||
lemma in_list_decompose_takeWhile:
|
||||
"x \<in> set xs \<Longrightarrow>
|
||||
xs = (takeWhile ((\<noteq>) x) xs) @ x # (drop (length (takeWhile ((\<noteq>) x) xs) + 1) xs)"
|
||||
by (induct xs arbitrary: x; clarsimp)
|
||||
|
||||
lemma takeWhile_neq_hd_eq_Nil[simp]:
|
||||
"takeWhile ((\<noteq>) (hd xs)) xs = Nil"
|
||||
by (metis (full_types) hd_Cons_tl takeWhile.simps(1) takeWhile.simps(2))
|
||||
|
||||
lemma heap_not_in_dom[simp]:
|
||||
"ptr \<notin> dom hp \<Longrightarrow> hp(ptr := None) = hp"
|
||||
by (auto simp: dom_def)
|
||||
|
||||
lemma heap_path_takeWhile_lookup_next:
|
||||
"\<lbrakk> heap_path hp st rs ed; r \<in> set rs \<rbrakk>
|
||||
\<Longrightarrow> heap_path hp st (takeWhile ((\<noteq>) r) rs) (Some r)"
|
||||
apply (drule heap_path_prefix)
|
||||
apply (subgoal_tac "takeWhile ((\<noteq>) r) rs @ [r] \<le> rs", fastforce)
|
||||
by (fastforce dest!: in_list_decompose_takeWhile intro: Prefix_Order.prefixI)
|
||||
|
||||
lemma heap_path_heap_upd_not_in:
|
||||
"\<lbrakk>heap_path hp st rs ed; r \<notin> set rs\<rbrakk> \<Longrightarrow> heap_path (hp(r:= x)) st rs ed"
|
||||
by (induct rs arbitrary: st; clarsimp)
|
||||
|
||||
lemma heap_path_last_update:
|
||||
"\<lbrakk>heap_path hp st xs end; xs \<noteq> []; distinct xs\<rbrakk> \<Longrightarrow> heap_path (hp(last xs := new)) st xs new"
|
||||
by (induct xs arbitrary: st rule: rev_induct; simp add: heap_path_heap_upd_not_in)
|
||||
|
||||
lemma heap_walk_lb:
|
||||
"heap_walk hp x xs \<ge> xs"
|
||||
apply (induct xs rule: heap_walk.induct; clarsimp)
|
||||
by (metis Prefix_Order.prefixE Prefix_Order.prefixI append_assoc)
|
||||
|
||||
lemma heal_walk_Some_nonempty':
|
||||
"heap_walk hp (Some x) [] > []"
|
||||
by (fastforce intro: heap_walk_lb less_le_trans[where y="[x]"])
|
||||
|
||||
lemma heal_walk_Some_nonempty:
|
||||
"heap_walk hp (Some x) [] \<noteq> []"
|
||||
by (metis less_list_def heal_walk_Some_nonempty')
|
||||
|
||||
lemma heap_walk_Nil_None:
|
||||
"heap_walk hp st [] = [] \<Longrightarrow> st = None"
|
||||
by (case_tac st; simp only: heal_walk_Some_nonempty)
|
||||
|
||||
lemma heap_path_last_end:
|
||||
"heap_path hp st xs end \<Longrightarrow> xs \<noteq> [] \<Longrightarrow> hp (last xs) = end"
|
||||
by (induct xs rule: rev_induct; clarsimp)
|
||||
|
||||
lemmas heap_ls_last_None = heap_path_last_end[where ?end=None]
|
||||
|
||||
(* sym_heap *)
|
||||
|
||||
definition sym_heap where
|
||||
"sym_heap hp hp' \<equiv> \<forall>p p'. hp p = Some p' \<longleftrightarrow> hp' p' = Some p"
|
||||
|
||||
lemma sym_heapD1:
|
||||
"sym_heap hp hp' \<Longrightarrow> hp p = Some p' \<Longrightarrow> hp' p' = Some p"
|
||||
by (clarsimp simp: sym_heap_def)
|
||||
|
||||
lemma sym_heapD2:
|
||||
"sym_heap hp hp' \<Longrightarrow> hp' p' = Some p \<Longrightarrow> hp p = Some p'"
|
||||
by (clarsimp simp: sym_heap_def)
|
||||
|
||||
lemma sym_heap_symmetric:
|
||||
"sym_heap hp hp' \<longleftrightarrow> sym_heap hp' hp"
|
||||
unfolding sym_heap_def by blast
|
||||
|
||||
lemma sym_heap_None:
|
||||
"\<lbrakk>sym_heap hp hp'; hp p = None\<rbrakk> \<Longrightarrow> \<forall>p'. hp' p' \<noteq> Some p" unfolding sym_heap_def by force
|
||||
|
||||
lemma sym_heap_remove_only:
|
||||
"\<lbrakk>sym_heap hp hp'; hp' y = Some x\<rbrakk> \<Longrightarrow> sym_heap (hp(x := None)) (hp'(y := None))"
|
||||
apply (clarsimp simp: sym_heap_def)
|
||||
apply (metis option.inject)
|
||||
done
|
||||
|
||||
lemma sym_heap_remove_only':
|
||||
"\<lbrakk>sym_heap hp hp'; hp y = Some x\<rbrakk> \<Longrightarrow> sym_heap (hp(y := None)) (hp'(x := None))"
|
||||
apply (clarsimp simp: sym_heap_def)
|
||||
apply (metis option.inject)
|
||||
done
|
||||
|
||||
lemma sym_heap_remove_middle_from_chain:
|
||||
"\<lbrakk>sym_heap hp hp'; before \<noteq> middle; middle \<noteq> after;
|
||||
hp before = Some middle; hp middle = Some after\<rbrakk>
|
||||
\<Longrightarrow> sym_heap (hp(before := Some after, middle := None))
|
||||
(hp'(after := Some before, middle := None))"
|
||||
apply (clarsimp simp: sym_heap_def)
|
||||
apply (metis option.simps(1))
|
||||
done
|
||||
|
||||
lemma sym_heap_connect:
|
||||
"\<lbrakk>sym_heap hp hp'; hp a = None; hp' b = None \<rbrakk> \<Longrightarrow> sym_heap (hp(a \<mapsto> b)) (hp'(b \<mapsto> a))"
|
||||
by (force simp: sym_heap_def)
|
||||
|
||||
lemma sym_heap_insert_into_middle_of_chain:
|
||||
"\<lbrakk>sym_heap hp hp'; hp before = Some after; hp middle = None; hp' middle = None\<rbrakk>
|
||||
\<Longrightarrow> sym_heap (hp(before \<mapsto> middle, middle \<mapsto> after)) (hp'(after \<mapsto> middle, middle \<mapsto> before))"
|
||||
apply (clarsimp simp: sym_heap_def)
|
||||
apply (metis option.simps)
|
||||
done
|
||||
|
||||
lemma sym_heap_path_reverse:
|
||||
"sym_heap hp hp' \<Longrightarrow>
|
||||
heap_path hp (Some p) (p#ps) (Some p')
|
||||
\<longleftrightarrow> heap_path hp' (Some p') (p'#(rev ps)) (Some p)"
|
||||
unfolding sym_heap_def by (induct ps arbitrary: p p' rule: rev_induct; force)
|
||||
|
||||
lemma sym_heap_ls_rev_Cons:
|
||||
"\<lbrakk>sym_heap hp hp'; heap_ls hp (Some p) (p#ps)\<rbrakk>
|
||||
\<Longrightarrow> heap_path hp' (Some (last (p#ps))) (rev ps) (Some p)"
|
||||
supply rev.simps[simp del]
|
||||
apply (induct ps arbitrary: p rule: rev_induct; simp add: rev.simps)
|
||||
by (auto dest!: sym_heap_path_reverse[THEN iffD1])
|
||||
|
||||
lemma sym_heap_ls_rev:
|
||||
"\<lbrakk>sym_heap hp hp'; heap_ls hp (Some p) ps\<rbrakk>
|
||||
\<Longrightarrow> heap_path hp' (Some (last ps)) (butlast (rev ps)) (Some p)
|
||||
\<and> hp (last ps) = None"
|
||||
apply (induct ps arbitrary: p rule: rev_induct, simp)
|
||||
apply (frule heap_path_head; clarsimp)
|
||||
by (auto dest!: sym_heap_path_reverse[THEN iffD1])
|
||||
|
||||
lemma heap_path_sym_heap_non_nil_lookup_prev:
|
||||
"\<lbrakk>heap_ls hp x (xs @ z # ys); sym_heap hp hp'; xs \<noteq> []\<rbrakk> \<Longrightarrow> hp' z = (Some (last xs))"
|
||||
supply heap_path_append[simp del]
|
||||
apply (cut_tac xs="butlast xs" and z="last xs" and ys="z # ys"
|
||||
in heap_path_non_nil_lookup_next[where hp=hp and x=x and y=None])
|
||||
apply (frule append_butlast_last_id)
|
||||
apply (metis append_eq_Cons_conv append_eq_append_conv2)
|
||||
apply (fastforce dest: sym_heapD1)
|
||||
done
|
||||
|
||||
lemma heap_ls_no_loops:
|
||||
"\<lbrakk>heap_ls hp st xs; p \<in> set xs\<rbrakk> \<Longrightarrow> hp p \<noteq> Some p"
|
||||
apply (frule heap_ls_distinct)
|
||||
apply (fastforce dest: split_list heap_path_non_nil_lookup_next split: list.splits)
|
||||
done
|
||||
|
||||
lemma heap_ls_prev_no_loops:
|
||||
"\<lbrakk>heap_ls hp st xs; p \<in> set xs; sym_heap hp hp'\<rbrakk> \<Longrightarrow> hp' p \<noteq> Some p"
|
||||
by (fastforce dest: heap_ls_no_loops sym_heapD2)
|
||||
|
||||
(* more on heap_path : next/prev in path *)
|
||||
|
||||
lemma heap_path_extend:
|
||||
"heap_path hp st (ls @ [p]) (hp p) \<longleftrightarrow> heap_path hp st ls (Some p)"
|
||||
by (induct ls rule: rev_induct; simp)
|
||||
|
||||
lemma heap_path_prefix_heap_ls:
|
||||
"\<lbrakk>heap_ls hp st xs; heap_path hp st ys ed\<rbrakk> \<Longrightarrow> ys \<le> xs"
|
||||
apply (induct xs arbitrary: ys st, simp)
|
||||
apply (case_tac ys; clarsimp)
|
||||
done
|
||||
|
||||
lemma distinct_decompose2:
|
||||
"\<lbrakk>distinct xs; xs = ys @ x # y # zs\<rbrakk>
|
||||
\<Longrightarrow> x \<noteq> y \<and> x \<notin> set ys \<and> y \<notin> set ys \<and> x \<notin> set zs \<and> y \<notin> set zs"
|
||||
by (simp add: in_set_conv_decomp)
|
||||
|
||||
lemma heap_path_distinct_next_cases: (* the other direction needs sym_heap *)
|
||||
"\<lbrakk>heap_path hp st xs ed; distinct xs; p \<in> set xs; hp p = Some np\<rbrakk>
|
||||
\<Longrightarrow> ed = Some p \<or> ed = Some np \<or> np \<in> set xs"
|
||||
apply (cases ed; simp)
|
||||
apply (frule in_list_decompose_takeWhile)
|
||||
apply (subgoal_tac "heap_ls hp st (takeWhile ((\<noteq>) p) xs @ p # drop (length (takeWhile ((\<noteq>) p) xs) + 1) xs)")
|
||||
apply (drule heap_path_non_nil_lookup_next)
|
||||
apply (case_tac "drop (length (takeWhile ((\<noteq>) p) xs) + 1) xs"; simp)
|
||||
apply (metis in_set_dropD list.set_intros(1))
|
||||
apply simp
|
||||
apply (frule in_list_decompose_takeWhile)
|
||||
apply (subgoal_tac "heap_path hp st (takeWhile ((\<noteq>) p) xs @ p # drop (length (takeWhile ((\<noteq>) p) xs) + 1) xs) ed")
|
||||
apply (frule heap_path_non_nil_lookup_next)
|
||||
apply (case_tac "drop (length (takeWhile ((\<noteq>) p) xs) + 1) xs", simp)
|
||||
apply (simp split: if_split_asm)
|
||||
apply (drule (1) distinct_decompose2)
|
||||
apply clarsimp
|
||||
by (metis in_set_dropD list.set_intros(1)) simp
|
||||
|
||||
lemma heap_ls_next_in_list:
|
||||
"\<lbrakk>heap_ls hp st xs; p \<in> set xs; hp p = Some np\<rbrakk>
|
||||
\<Longrightarrow> np \<in> set xs"
|
||||
apply (subgoal_tac "distinct xs")
|
||||
by (fastforce dest!: heap_path_distinct_next_cases) (erule heap_ls_distinct)
|
||||
|
||||
lemma heap_path_distinct_sym_prev_cases:
|
||||
"\<lbrakk>heap_path hp st xs ed; distinct xs; np \<in> set xs; hp p = Some np; sym_heap hp hp'\<rbrakk>
|
||||
\<Longrightarrow> st = Some np \<or> p \<in> set xs"
|
||||
apply (cases st; simp)
|
||||
apply (rename_tac stp)
|
||||
apply (case_tac "stp = np"; simp)
|
||||
apply (cases xs; simp del: heap_path.simps)
|
||||
apply (frule heap_path_head, simp)
|
||||
apply (cases ed, clarsimp)
|
||||
apply (frule sym_heap_ls_rev_Cons, fastforce)
|
||||
apply (drule heap_path_distinct_next_cases[where hp=hp']; simp add: sym_heap_def)
|
||||
apply simp
|
||||
apply (simp del: heap_path.simps)
|
||||
apply (frule (1) sym_heap_path_reverse[where hp'=hp', THEN iffD1])
|
||||
apply simp
|
||||
apply (frule heap_path_distinct_next_cases[where hp=hp']; simp add: sym_heap_def)
|
||||
apply fastforce
|
||||
done
|
||||
|
||||
lemma heap_ls_prev_cases:
|
||||
"\<lbrakk>heap_ls hp st xs; np \<in> set xs; hp p = Some np; sym_heap hp hp'\<rbrakk>
|
||||
\<Longrightarrow> st = Some np \<or> p \<in> set xs"
|
||||
apply (subgoal_tac "distinct xs")
|
||||
by (fastforce dest!: heap_path_distinct_sym_prev_cases) (erule heap_ls_distinct)
|
||||
|
||||
lemma heap_ls_prev_not_in:
|
||||
"\<lbrakk>heap_ls hp st xs; np \<notin> set xs; hp p = Some np\<rbrakk>
|
||||
\<Longrightarrow> p \<notin> set xs"
|
||||
by (meson heap_ls_next_in_list)
|
||||
|
||||
lemma heap_path_distinct_prev_not_in:
|
||||
"\<lbrakk>heap_path hp st xs ed; distinct xs; np \<notin> set xs; hp p = Some np; ed \<noteq> Some np; ed \<noteq> Some p\<rbrakk>
|
||||
\<Longrightarrow> p \<notin> set xs"
|
||||
using heap_path_distinct_next_cases
|
||||
by fastforce
|
||||
|
||||
lemma heap_path_distinct_next_not_in:
|
||||
"\<lbrakk>heap_path hp st xs ed; distinct xs; p \<notin> set xs; hp p = Some np;
|
||||
sym_heap hp hp'; st \<noteq> Some np\<rbrakk>
|
||||
\<Longrightarrow> np \<notin> set xs"
|
||||
by (fastforce dest!: heap_path_distinct_sym_prev_cases[simplified])
|
||||
|
||||
lemma heap_ls_next_not_in:
|
||||
"\<lbrakk>heap_ls hp st xs; p \<notin> set xs; hp p = Some np; sym_heap hp hp'; st \<noteq> Some np\<rbrakk>
|
||||
\<Longrightarrow> np \<notin> set xs"
|
||||
by (fastforce dest!: heap_ls_prev_cases[simplified])
|
||||
|
||||
lemma sym_heap_prev_None_is_start:
|
||||
"\<lbrakk>heap_ls hp st xs; sym_heap hp hp'; p \<in> set xs; hp' p = None\<rbrakk>
|
||||
\<Longrightarrow> Some p = st"
|
||||
using split_list_last heap_path_sym_heap_non_nil_lookup_prev
|
||||
by fastforce
|
||||
|
||||
lemma not_last_next_not_None:
|
||||
"\<lbrakk>heap_ls hp st xs; p \<in> set xs; p \<noteq> last xs\<rbrakk> \<Longrightarrow> hp p \<noteq> None"
|
||||
by (fastforce intro: heap_path_head dest: split_list)
|
||||
|
||||
lemma not_head_prev_not_None:
|
||||
"\<lbrakk>heap_ls hp st xs; p \<in> set xs; p \<noteq> hd xs; sym_heap hp hp'\<rbrakk>
|
||||
\<Longrightarrow> hp' p \<noteq> None"
|
||||
using sym_heap_prev_None_is_start heap_path_head
|
||||
by fastforce
|
||||
|
||||
(* more on heap_path *)
|
||||
|
||||
lemma heap_ls_next_takeWhile_append:
|
||||
"\<lbrakk>heap_ls hp st xs; p \<in> set xs; hp p = Some np\<rbrakk>
|
||||
\<Longrightarrow> takeWhile ((\<noteq>) np) xs = (takeWhile ((\<noteq>) p) xs) @ [p]"
|
||||
apply (frule heap_ls_distinct)
|
||||
apply (frule in_list_decompose_takeWhile)
|
||||
apply (subgoal_tac "heap_ls hp st (takeWhile ((\<noteq>) p) xs @ p # drop (length (takeWhile ((\<noteq>) p) xs) + 1) xs)")
|
||||
prefer 2 apply simp
|
||||
apply (drule heap_path_non_nil_lookup_next)
|
||||
apply (case_tac "drop (length (takeWhile ((\<noteq>) p) xs) + 1) xs"; simp)
|
||||
apply (subgoal_tac "np \<in> set xs")
|
||||
prefer 2 apply (erule (2) heap_ls_next_in_list)
|
||||
apply (frule in_list_decompose_takeWhile[where x=np])
|
||||
apply (drule (1) distinct_inj_middle[where x=np and xa="takeWhile ((\<noteq>) np) xs" and ya="takeWhile ((\<noteq>) p) xs @ [p]"])
|
||||
apply simp+
|
||||
done
|
||||
|
||||
(* RT FIXME: Move *)
|
||||
lemma takeWhile_neq_notin_same:
|
||||
"x \<notin> set xs \<Longrightarrow> takeWhile ((\<noteq>) x) xs = xs"
|
||||
using takeWhile_eq_all_conv by blast
|
||||
|
||||
lemma heap_path_extend_takeWhile:
|
||||
"\<lbrakk>heap_ls hp st xs; heap_path hp st (takeWhile ((\<noteq>) p) xs) (Some p); hp p = Some np\<rbrakk>
|
||||
\<Longrightarrow> heap_path hp st (takeWhile ((\<noteq>) np) xs) (Some np)"
|
||||
apply (case_tac "p \<in> set xs")
|
||||
apply (subst heap_ls_next_takeWhile_append[where p=p and np=np and hp=hp]; simp)
|
||||
apply (drule takeWhile_neq_notin_same, simp)
|
||||
apply (drule (1) heap_path_end_unique, simp)
|
||||
done
|
||||
|
||||
lemma heap_ls_next_takeWhile_append_sym:
|
||||
"\<lbrakk>heap_ls hp st xs; np \<in> set xs; st \<noteq> Some np; hp p = Some np; sym_heap hp hp'\<rbrakk>
|
||||
\<Longrightarrow>takeWhile ((\<noteq>) np) xs = (takeWhile ((\<noteq>) p) xs) @ [p]"
|
||||
apply (frule (3) heap_ls_prev_cases, simp)
|
||||
apply (fastforce elim!: heap_ls_next_takeWhile_append)
|
||||
done
|
||||
|
||||
lemma heap_path_curtail_takeWhile:
|
||||
"\<lbrakk>heap_ls hp st xs; heap_path hp st (takeWhile ((\<noteq>) np) xs) (Some np);
|
||||
st \<noteq> Some np; hp p = Some np; sym_heap hp hp'\<rbrakk>
|
||||
\<Longrightarrow> heap_path hp st (takeWhile ((\<noteq>) p) xs) (Some p)"
|
||||
apply (case_tac "np \<in> set xs")
|
||||
apply (drule (4) heap_ls_next_takeWhile_append_sym)
|
||||
apply simp
|
||||
apply (drule takeWhile_neq_notin_same, simp)
|
||||
apply (drule (1) heap_path_end_unique, simp)
|
||||
done
|
||||
|
||||
(* more on heap_path : end *)
|
||||
|
||||
|
||||
\<comment> \<open>Lemmas relating an update to the list to an update to the heap\<close>
|
||||
|
||||
lemma heap_ls_prepend:
|
||||
"\<lbrakk>heap_ls hp st xs; new \<notin> set xs; xs \<noteq> []\<rbrakk>
|
||||
\<Longrightarrow> heap_ls (hp(new := Some (hd xs))) (Some new) (new # xs)"
|
||||
apply simp
|
||||
apply (erule heap_path_heap_upd_not_in[rotated])
|
||||
apply (frule (1) heap_path_head)
|
||||
apply fastforce
|
||||
done
|
||||
|
||||
lemma heap_ls_append:
|
||||
"\<lbrakk>heap_ls hp st xs; xs \<noteq> []; new \<notin> set xs\<rbrakk>
|
||||
\<Longrightarrow> heap_ls (hp(last xs := Some new, new := None)) st (xs @ [new])"
|
||||
apply (frule heap_ls_distinct)
|
||||
apply simp
|
||||
apply (rule heap_path_heap_upd_not_in)
|
||||
apply (fastforce simp: heap_path_last_update)
|
||||
apply assumption
|
||||
done
|
||||
|
||||
lemma heap_ls_list_insert_before:
|
||||
"\<lbrakk>heap_ls hp st (xs @ ys); new \<notin> set (xs @ ys); xs \<noteq> []; ys \<noteq> []\<rbrakk>
|
||||
\<Longrightarrow> heap_ls (hp(last xs := Some new, new := Some (hd ys))) st
|
||||
(list_insert_before (xs @ ys) (hd ys) new)"
|
||||
apply (frule heap_ls_distinct)
|
||||
apply (subst list_insert_before_distinct; fastforce?)
|
||||
apply simp
|
||||
apply (rule conjI)
|
||||
\<comment> \<open>the path until new\<close>
|
||||
apply (fastforce intro: heap_path_heap_upd_not_in heap_path_last_update)
|
||||
\<comment> \<open>the path from hd ys\<close>
|
||||
apply (metis disjoint_iff_not_equal heap_path_head heap_path_heap_upd_not_in last_in_set)
|
||||
done
|
||||
|
||||
lemma heap_ls_remove_singleton:
|
||||
"heap_ls hp st [x] \<Longrightarrow> heap_ls (hp(x := None)) None []"
|
||||
by simp
|
||||
|
||||
lemma heap_ls_remove_head_not_singleton:
|
||||
"\<lbrakk>heap_ls hp st xs; tl xs \<noteq> []\<rbrakk>
|
||||
\<Longrightarrow> heap_ls (hp(hd xs := None)) (Some (hd (tl xs))) (tl xs)"
|
||||
apply (frule heap_ls_distinct)
|
||||
apply (cases xs; simp)
|
||||
apply clarsimp
|
||||
apply (frule heap_path_head)
|
||||
apply fastforce
|
||||
apply (fastforce elim!: heap_path_heap_upd_not_in)
|
||||
done
|
||||
|
||||
lemma heap_ls_remove_last_not_singleton:
|
||||
"\<lbrakk>heap_ls hp st xs; butlast xs \<noteq> []\<rbrakk>
|
||||
\<Longrightarrow> heap_ls (hp((last (butlast xs)) := None)) st (butlast xs)"
|
||||
apply (frule heap_ls_distinct)
|
||||
apply (frule distinct_butlast)
|
||||
apply (fastforce dest: heap_path_last_update heap_path_butlast)
|
||||
done
|
||||
|
||||
lemma heap_ls_remove_middle:
|
||||
"\<lbrakk>heap_ls hp st (xs @ a # ys); xs \<noteq> []; ys \<noteq> []\<rbrakk>
|
||||
\<Longrightarrow> heap_ls (hp(last xs := Some (hd ys), a := None)) st (xs @ ys)"
|
||||
apply (frule heap_ls_distinct)
|
||||
apply simp
|
||||
apply (rule_tac x="Some (hd ys)" in exI)
|
||||
apply (rule conjI)
|
||||
apply (fastforce intro: heap_path_heap_upd_not_in heap_path_last_update)
|
||||
apply (rule heap_path_heap_upd_not_in)
|
||||
apply (rule heap_path_heap_upd_not_in)
|
||||
using heap_path_head apply fastforce
|
||||
apply force
|
||||
apply fastforce
|
||||
done
|
||||
|
||||
end
|
|
@ -22,7 +22,7 @@ imports
|
|||
Monads.Monad_Lib
|
||||
Basics.CLib
|
||||
NICTATools
|
||||
"Word_Lib.WordSetup"
|
||||
"Word_Lib_l4v.WordSetup"
|
||||
begin
|
||||
|
||||
(* FIXME: eliminate *)
|
||||
|
@ -2558,6 +2558,10 @@ lemma ranD:
|
|||
"v \<in> ran f \<Longrightarrow> \<exists>x. f x = Some v"
|
||||
by (auto simp: ran_def)
|
||||
|
||||
lemma fun_upd_swap:
|
||||
"a \<noteq> c \<Longrightarrow> hp(c := d, a := b) = hp(a := b, c := d)"
|
||||
by fastforce
|
||||
|
||||
text \<open>Prevent clarsimp and others from creating Some from not None by folding this and unfolding
|
||||
again when safe.\<close>
|
||||
|
||||
|
|
|
@ -371,6 +371,32 @@ lemma list_insert_after_after:
|
|||
apply fastforce
|
||||
done
|
||||
|
||||
lemma list_insert_before_not_found:
|
||||
"a \<notin> set ls \<Longrightarrow> list_insert_before ls a new = ls"
|
||||
by (induct ls; fastforce)
|
||||
|
||||
lemma list_insert_before_nonempty:
|
||||
"ls \<noteq> [] \<Longrightarrow> list_insert_before ls a new \<noteq> []"
|
||||
by (induct ls; clarsimp)
|
||||
|
||||
lemma list_insert_before_head:
|
||||
"xs \<noteq> [] \<Longrightarrow> list_insert_before xs (hd xs) new = new # xs"
|
||||
by (induct xs; fastforce)
|
||||
|
||||
lemma last_of_list_insert_before:
|
||||
"xs \<noteq> [] \<Longrightarrow> last (list_insert_before xs a new) = last xs"
|
||||
by (induct xs; clarsimp simp: list_insert_before_nonempty)
|
||||
|
||||
lemma list_insert_before_distinct:
|
||||
"\<lbrakk>distinct (xs @ ys); ys \<noteq> []\<rbrakk> \<Longrightarrow> list_insert_before (xs @ ys) (hd ys) new = xs @ new # ys"
|
||||
by (induct xs; fastforce simp add: list_insert_before_head)
|
||||
|
||||
lemma set_list_insert_before:
|
||||
"\<lbrakk>new \<notin> set ls; before \<in> set ls\<rbrakk> \<Longrightarrow> set (list_insert_before ls before new) = set ls \<union> {new}"
|
||||
apply (induct ls; clarsimp)
|
||||
apply fastforce
|
||||
done
|
||||
|
||||
lemma list_remove_removed:
|
||||
"set (list_remove list x) = (set list) - {x}"
|
||||
apply (induct list,simp+)
|
||||
|
|
|
@ -26,6 +26,12 @@ primrec list_insert_after :: "'a list \<Rightarrow> 'a \<Rightarrow> 'a \<Righta
|
|||
"list_insert_after (x # xs) a b = (if x = a then x # b # xs
|
||||
else x # list_insert_after xs a b)"
|
||||
|
||||
\<comment> \<open>Inserts the value new immediately before the first occurence of a (if any) in the list\<close>
|
||||
primrec list_insert_before :: "'a list \<Rightarrow> 'a \<Rightarrow> 'a \<Rightarrow> 'a list" where
|
||||
"list_insert_before [] a new = []" |
|
||||
"list_insert_before (x # xs) a new = (if x = a then new # x # xs
|
||||
else x # list_insert_before xs a new)"
|
||||
|
||||
primrec list_remove :: "'a list \<Rightarrow> 'a \<Rightarrow> 'a list" where
|
||||
"list_remove [] a = []" |
|
||||
"list_remove (x # xs) a = (if x = a then (list_remove xs a)
|
||||
|
|
|
@ -10,7 +10,7 @@
|
|||
theory MonadicRewrite
|
||||
imports
|
||||
Monads.Nondet_VCG
|
||||
Corres_UL
|
||||
ExtraCorres
|
||||
Monads.Nondet_Empty_Fail
|
||||
Rules_Tac
|
||||
begin
|
||||
|
@ -81,6 +81,10 @@ lemma monadic_rewrite_pre_imp_eq:
|
|||
"\<lbrakk> \<And>s. P s \<Longrightarrow> f s = g s \<rbrakk> \<Longrightarrow> monadic_rewrite F E P f g"
|
||||
by (simp add: monadic_rewrite_def)
|
||||
|
||||
lemma monadic_rewrite_guard_arg_cong:
|
||||
"(\<And>s. P s \<Longrightarrow> x = y) \<Longrightarrow> monadic_rewrite F E P (f x) (f y)"
|
||||
by (clarsimp simp: monadic_rewrite_def)
|
||||
|
||||
lemma monadic_rewrite_exists:
|
||||
"(\<And>v. monadic_rewrite F E (Q v) m m')
|
||||
\<Longrightarrow> monadic_rewrite F E ((\<lambda>s. \<forall>v. P v s \<longrightarrow> Q v s) and (\<lambda>s. \<exists>v. P v s)) m m'"
|
||||
|
@ -693,7 +697,7 @@ lemma monadic_rewrite_refine_validE_R:
|
|||
by (simp add: validE_R_def validE_def monadic_rewrite_refine_valid)
|
||||
|
||||
lemma monadic_rewrite_is_valid:
|
||||
"\<lbrakk> monadic_rewrite False False P' f f'; \<lbrace>P\<rbrace> do x <- f; g x od \<lbrace>Q\<rbrace> \<rbrakk>
|
||||
"\<lbrakk> monadic_rewrite False E P' f f'; \<lbrace>P\<rbrace> do x <- f; g x od \<lbrace>Q\<rbrace> \<rbrakk>
|
||||
\<Longrightarrow> \<lbrace>P and P'\<rbrace> do x <- f'; g x od \<lbrace>Q\<rbrace>"
|
||||
by (fastforce simp: monadic_rewrite_def valid_def bind_def)
|
||||
|
||||
|
@ -740,6 +744,13 @@ lemma monadic_rewrite_corres_r_generic:
|
|||
\<Longrightarrow> corres_underlying R nf nf' r P (P' and Q) a c"
|
||||
by (fastforce simp: corres_underlying_def monadic_rewrite_def)
|
||||
|
||||
lemma monadic_rewrite_corres_r_generic_ex_abs:
|
||||
"\<lbrakk> monadic_rewrite F E (\<lambda>s'. Q s' \<and> ex_abs_underlying sr P s') c' c;
|
||||
corres_underlying sr nf nf'' r P P' a c';
|
||||
F \<longrightarrow> nf''; nf' \<longrightarrow> nf'' \<rbrakk>
|
||||
\<Longrightarrow> corres_underlying sr nf nf' r P (P' and Q) a c"
|
||||
by (fastforce simp: corres_underlying_def monadic_rewrite_def)
|
||||
|
||||
lemma monadic_rewrite_corres_r:
|
||||
"\<lbrakk> monadic_rewrite False True Q c c';
|
||||
corres_underlying R nf nf' r P P' a c' \<rbrakk>
|
||||
|
|
|
@ -7,6 +7,8 @@
|
|||
(* Syntax for using multi-argument functions as predicates, e.g "P and Q" where P and Q are
|
||||
functions to bool, taking one or more parameters. *)
|
||||
|
||||
chapter \<open>Function Predicate Syntax\<close>
|
||||
|
||||
theory Fun_Pred_Syntax
|
||||
imports Main
|
||||
begin
|
||||
|
@ -137,7 +139,9 @@ lemma pred_bot_comp[simp]:
|
|||
by (simp add: comp_def)
|
||||
|
||||
|
||||
text \<open>We would get these for free if we could instantiate pred_top/pred_bot to top/bot directly:\<close>
|
||||
text \<open>
|
||||
We would get these for free if we could instantiate @{const pred_top}/@{const pred_bot} to
|
||||
@{const top}/@{const bot} directly:\<close>
|
||||
|
||||
lemmas pred_top_left_neutral[simp] =
|
||||
inf_top.left_neutral[where 'a="'a \<Rightarrow> bool", unfolded pred_top_def]
|
||||
|
|
|
@ -7,6 +7,7 @@
|
|||
chapter Lib
|
||||
|
||||
session Monads (lib) = HOL +
|
||||
options [document = pdf]
|
||||
|
||||
sessions
|
||||
"HOL-Library"
|
||||
|
@ -21,14 +22,7 @@ session Monads (lib) = HOL +
|
|||
trace
|
||||
|
||||
theories
|
||||
WPBang
|
||||
WPFix
|
||||
Eisbach_WP
|
||||
WPI
|
||||
WPC
|
||||
WP_Pre
|
||||
WP
|
||||
Datatype_Schematic
|
||||
Fun_Pred_Syntax
|
||||
Nondet_Monad
|
||||
Nondet_Lemmas
|
||||
Nondet_VCG
|
||||
|
@ -49,6 +43,28 @@ session Monads (lib) = HOL +
|
|||
Trace_Lemmas
|
||||
Trace_VCG
|
||||
Trace_Det
|
||||
Trace_No_Throw
|
||||
Trace_Empty_Fail
|
||||
Trace_No_Trace
|
||||
Trace_Total
|
||||
Trace_Strengthen_Setup
|
||||
Trace_Monad_Equations
|
||||
Trace_RG
|
||||
Trace_In_Monad
|
||||
Trace_More_VCG
|
||||
Trace_No_Fail
|
||||
Trace_Sat
|
||||
Strengthen
|
||||
Nondet_Strengthen_Setup
|
||||
Strengthen_Demo
|
||||
WPBang
|
||||
WPFix
|
||||
Eisbach_WP
|
||||
WPI
|
||||
WPC
|
||||
WP_Pre
|
||||
WP
|
||||
Datatype_Schematic
|
||||
|
||||
document_files
|
||||
"root.tex"
|
||||
|
|
|
@ -4,6 +4,8 @@
|
|||
* SPDX-License-Identifier: BSD-2-Clause
|
||||
*)
|
||||
|
||||
section \<open>Manipulating Hoare Logic Assertions\<close>
|
||||
|
||||
theory Strengthen
|
||||
imports Main
|
||||
begin
|
||||
|
|
|
@ -73,11 +73,11 @@ thm subset_UNIV subset_UNIV[mk_strg]
|
|||
|
||||
text \<open>Rules which would introduce schematics are
|
||||
adjusted by @{attribute mk_strg} to introduce quantifiers
|
||||
instead. The argument I to mk_strg prevents this step.
|
||||
instead. The argument I to @{attribute mk_strg} prevents this step.
|
||||
\<close>
|
||||
thm subsetD subsetD[mk_strg I] subsetD[mk_strg]
|
||||
|
||||
text \<open>The first argument to mk_strg controls the way
|
||||
text \<open>The first argument to @{attribute mk_strg} controls the way
|
||||
the rule will be applied.
|
||||
I: use rule in introduction style, matching its conclusion.
|
||||
D: use rule in destruction (forward) style, matching its first premise.
|
||||
|
@ -101,10 +101,10 @@ lemma
|
|||
(* oops, overdid it *)
|
||||
oops
|
||||
|
||||
text \<open>Subsequent arguments to mk_strg capture premises for
|
||||
text \<open>Subsequent arguments to @{attribute mk_strg} capture premises for
|
||||
special treatment. The 'A' argument (synonym 'E') specifies that
|
||||
a premise should be solved by assumption. Our fancy proof above
|
||||
used a strengthen rule bexI[mk_strg I _ A], which tells strengthen
|
||||
used a strengthen rule @{text "bexI[mk_strg I _ A]"}, which tells strengthen
|
||||
to do approximately the same thing as
|
||||
\<open>apply (rule bexI) prefer 2 apply assumption\<close>
|
||||
|
||||
|
|
|
@ -0,0 +1,81 @@
|
|||
%
|
||||
% Copyright 2020, Data61, CSIRO (ABN 41 687 119 230)
|
||||
%
|
||||
% SPDX-License-Identifier: CC-BY-SA-4.0
|
||||
%
|
||||
|
||||
\documentclass[11pt,a4paper]{report}
|
||||
\usepackage[T1]{fontenc}
|
||||
\usepackage{isabelle,isabellesym}
|
||||
|
||||
% this should be the last package used
|
||||
\usepackage{pdfsetup}
|
||||
|
||||
% urls in roman style, theory text in math-similar italics
|
||||
\urlstyle{rm}
|
||||
\isabellestyle{tt}
|
||||
|
||||
|
||||
\begin{document}
|
||||
|
||||
\title{State Monad Library}
|
||||
\author{David Greenaway,
|
||||
Gerwin Klein,
|
||||
Corey Lewis,
|
||||
Daniel Matichuk,
|
||||
Thomas Sewell}
|
||||
\maketitle
|
||||
|
||||
\begin{abstract}
|
||||
This entry contains a library of different state monads with a large set of
|
||||
operators, laws, Hoare Logic, weakest precondition rules, and corresponding
|
||||
automation. The formalisation is designed for reasoning about total and
|
||||
partial correctness, as well as for reasoning about failure separately from
|
||||
normal behaviour. Partial correctness in this context ignores program failure.
|
||||
Total correctness implies the absence of program failure.
|
||||
|
||||
The main monads formalised in this entry are a non-deterministic state monad
|
||||
with failure, and a state-based trace monad for modelling concurrent executions.
|
||||
Both support roughly the same set of operators. They come with a standard
|
||||
Hoare Logic and Rely-Guarantee logic respectively. The entry also contains an
|
||||
option reader monad that combines well with the non-deterministic state monad.
|
||||
The option reader monad provides additional operators useful for building
|
||||
state projections that can be used both in monadic functions and Hoare-Logic
|
||||
assertions, enhancing specification re-use in proofs.
|
||||
|
||||
This monad library is used in the verification of the seL4 microkernel and
|
||||
predates some of the monad developments in the Isabelle library. In
|
||||
particular, it defines its own syntax for do-notation, which can be overridden
|
||||
with the generic monad syntax in the Isabelle library. We have opted not to do
|
||||
so by default, because the overloading-based syntax from the Isabelle library
|
||||
often necessitates additional type annotations when mixing different monad
|
||||
types within one specification. For similar reasons, no attempt is made to
|
||||
state generic state monad laws in a type class or locale and then instantiate
|
||||
them for the two main monad instances. The resulting duplication from two
|
||||
instances is still easy to handle, but if more instances are added to this
|
||||
library, additional work on genericity would be useful.
|
||||
|
||||
This library has grown over more than a decade with many substantial
|
||||
contributions. We would specifically like to acknowledge the contributions by
|
||||
Nelson Billing, Andrew Boyton, Matthew Brecknell, David Cock, Matthias Daum,
|
||||
Alejandro Gomez-Londono, Rafal Kolanski, Japheth Lim, Michael McInerney, Toby
|
||||
Murray, Lars Noschinski, Edward Pierzchalski, Sean Seefried, Miki Tanaka, Vernon
|
||||
Tang, and Simon Windwood.
|
||||
\end{abstract}
|
||||
|
||||
\tableofcontents
|
||||
|
||||
\parindent 0pt\parskip 0.5ex
|
||||
|
||||
% generated text of all theories
|
||||
\input{session}
|
||||
|
||||
\bibliographystyle{abbrv}
|
||||
\bibliography{root}
|
||||
|
||||
\end{document}
|
||||
|
||||
%%% Local Variables:
|
||||
%%% mode: latex
|
||||
%%% TeX-master: t
|
||||
%%% End:
|
|
@ -14,7 +14,7 @@ begin
|
|||
section \<open>Monads that are wellformed w.r.t. failure\<close>
|
||||
|
||||
text \<open>
|
||||
Usually, well-formed monads constructed from the primitives in Nondet_Monad will have the following
|
||||
Usually, well-formed monads constructed from the primitives in @{text Nondet_Monad} will have the following
|
||||
property: if they return an empty set of results, they will have the failure flag set.\<close>
|
||||
definition empty_fail :: "('s,'a) nondet_monad \<Rightarrow> bool" where
|
||||
"empty_fail m \<equiv> \<forall>s. fst (m s) = {} \<longrightarrow> snd (m s)"
|
||||
|
|
|
@ -130,7 +130,7 @@ lemma no_fail_returnOK[simp, wp]:
|
|||
by (simp add: returnOk_def)
|
||||
|
||||
lemma no_fail_bind[wp]:
|
||||
"\<lbrakk> no_fail P f; \<And>rv. no_fail (R rv) (g rv); \<lbrace>Q\<rbrace> f \<lbrace>R\<rbrace> \<rbrakk> \<Longrightarrow> no_fail (P and Q) (f >>= (\<lambda>rv. g rv))"
|
||||
"\<lbrakk> \<And>rv. no_fail (R rv) (g rv); \<lbrace>Q\<rbrace> f \<lbrace>R\<rbrace>; no_fail P f \<rbrakk> \<Longrightarrow> no_fail (P and Q) (f >>= (\<lambda>rv. g rv))"
|
||||
unfolding no_fail_def bind_def
|
||||
using post_by_hoare by fastforce
|
||||
|
||||
|
|
|
@ -32,7 +32,7 @@ lemma no_throw_def':
|
|||
by (clarsimp simp: no_throw_def validE_def2 split_def split: sum.splits)
|
||||
|
||||
|
||||
subsection \<open>no_throw rules\<close>
|
||||
subsection \<open>@{text no_throw} rules\<close>
|
||||
|
||||
lemma no_throw_returnOk[simp]:
|
||||
"no_throw P (returnOk a)"
|
||||
|
|
|
@ -11,7 +11,7 @@ theory Nondet_Total
|
|||
imports Nondet_No_Fail
|
||||
begin
|
||||
|
||||
section \<open>Total correctness for Nondet_Monad and Nondet_Monad with exceptions\<close>
|
||||
section \<open>Total correctness for @{text Nondet_Monad} and @{text Nondet_Monad} with exceptions\<close>
|
||||
|
||||
subsection Definitions
|
||||
|
||||
|
|
|
@ -33,7 +33,7 @@ text \<open>
|
|||
the empty set, the triple is trivially valid. This means @{term "assert P"}
|
||||
does not require us to prove that @{term P} holds, but rather allows us
|
||||
to assume @{term P}! Proving non-failure is done via a separate predicate and
|
||||
calculus (see Nondet_No_Fail).\<close>
|
||||
calculus (see theory @{text Nondet_No_Fail}).\<close>
|
||||
definition valid ::
|
||||
"('s \<Rightarrow> bool) \<Rightarrow> ('s,'a) nondet_monad \<Rightarrow> ('a \<Rightarrow> 's \<Rightarrow> bool) \<Rightarrow> bool"
|
||||
("\<lbrace>_\<rbrace>/ _ /\<lbrace>_\<rbrace>") where
|
||||
|
|
|
@ -198,7 +198,7 @@ lemma whileLoop_results_induct_lemma3 [consumes 1]:
|
|||
apply clarsimp
|
||||
done
|
||||
|
||||
subsection "Inductive reasoning about whileLoop results"
|
||||
subsection "Inductive reasoning about @{const whileLoop} results"
|
||||
|
||||
lemma in_whileLoop_induct[consumes 1]:
|
||||
assumes in_whileLoop: "(r', s') \<in> fst (whileLoop C B r s)"
|
||||
|
@ -278,7 +278,7 @@ lemma whileLoop_terminatesE_induct [consumes 1]:
|
|||
apply (force simp: lift_def split: sum.splits)
|
||||
done
|
||||
|
||||
subsection "Direct reasoning about whileLoop components"
|
||||
subsection "Direct reasoning about @{const whileLoop} components"
|
||||
|
||||
lemma fst_whileLoop_cond_false:
|
||||
assumes loop_result: "(r', s') \<in> fst (whileLoop C B r s)"
|
||||
|
@ -447,33 +447,36 @@ lemma exs_valid_whileLoop:
|
|||
and wf_R: "wf R"
|
||||
and final_I: "\<And>r s. \<lbrakk> T r s; \<not> C r s \<rbrakk> \<Longrightarrow> Q r s"
|
||||
shows "\<lbrace> P \<rbrace> whileLoop C B r \<exists>\<lbrace> Q \<rbrace>"
|
||||
proof (clarsimp simp: exs_valid_def Bex_def)
|
||||
fix s
|
||||
assume "P s"
|
||||
|
||||
proof -
|
||||
{
|
||||
fix x
|
||||
have "T (fst x) (snd x) \<Longrightarrow> \<exists>r' s'. (r', s') \<in> fst (whileLoop C B (fst x) (snd x)) \<and> T r' s'"
|
||||
using wf_R
|
||||
proof induct
|
||||
case (less x)
|
||||
then show ?case
|
||||
apply atomize
|
||||
apply (cases "C (fst x) (snd x)")
|
||||
apply (subst whileLoop_unroll)
|
||||
apply (clarsimp simp: condition_def bind_def')
|
||||
apply (cut_tac iter_I[where ?s0.0="snd x" and r="fst x"])
|
||||
apply (clarsimp simp: exs_valid_def)
|
||||
apply blast
|
||||
apply (subst whileLoop_unroll)
|
||||
apply (cases x)
|
||||
apply (clarsimp simp: condition_def bind_def' return_def)
|
||||
done
|
||||
qed
|
||||
}
|
||||
fix s
|
||||
assume "P s"
|
||||
|
||||
thus "\<exists>r' s'. (r', s') \<in> fst (whileLoop C B r s) \<and> Q r' s'"
|
||||
by (metis \<open>P s\<close> fst_conv init_T snd_conv final_I fst_whileLoop_cond_false)
|
||||
{
|
||||
fix x
|
||||
have "T (fst x) (snd x) \<Longrightarrow> \<exists>r' s'. (r', s') \<in> fst (whileLoop C B (fst x) (snd x)) \<and> T r' s'"
|
||||
using wf_R
|
||||
proof induct
|
||||
case (less x)
|
||||
then show ?case
|
||||
apply atomize
|
||||
apply (cases "C (fst x) (snd x)")
|
||||
apply (subst whileLoop_unroll)
|
||||
apply (clarsimp simp: condition_def bind_def')
|
||||
apply (cut_tac iter_I[where ?s0.0="snd x" and r="fst x"])
|
||||
apply (clarsimp simp: exs_valid_def)
|
||||
apply blast
|
||||
apply (subst whileLoop_unroll)
|
||||
apply (cases x)
|
||||
apply (clarsimp simp: condition_def bind_def' return_def)
|
||||
done
|
||||
qed
|
||||
}
|
||||
|
||||
then have "\<exists>r' s'. (r', s') \<in> fst (whileLoop C B r s) \<and> Q r' s'"
|
||||
by (metis \<open>P s\<close> fst_conv init_T snd_conv final_I fst_whileLoop_cond_false)
|
||||
}
|
||||
thus ?thesis by (clarsimp simp: exs_valid_def Bex_def)
|
||||
qed
|
||||
|
||||
lemma empty_fail_whileLoop[empty_fail_cond, intro!, wp]:
|
||||
|
@ -618,7 +621,7 @@ lemma whileLoopE_liftE:
|
|||
apply (rule set_eqI, rule iffI)
|
||||
apply clarsimp
|
||||
apply (clarsimp simp: in_liftE whileLoop_def)
|
||||
\<comment> \<open>The schematic existential is instantiated by 'subst isr_Inr_proj' ... 'rule refl' in two lines\<close>
|
||||
\<comment> \<open>The schematic existential is instantiated by @{text "subst isr_Inr_projr ... rule refl"} in two lines\<close>
|
||||
apply (rule exI)
|
||||
apply (rule conjI)
|
||||
apply (subst isr_Inr_projr)
|
||||
|
@ -788,7 +791,7 @@ lemma whileLoopE_wp_inv [wp]:
|
|||
apply (rule validE_whileLoopE [where I=I], auto)
|
||||
done
|
||||
|
||||
subsection "Stronger whileLoop rules"
|
||||
subsection "Stronger @{const whileLoop} rules"
|
||||
|
||||
lemma whileLoop_rule_strong:
|
||||
assumes init_U: "\<lbrace> \<lambda>s'. s' = s \<rbrace> whileLoop C B r \<lbrace> \<lambda>r s. (r, s) \<in> fst Q \<rbrace>"
|
||||
|
@ -848,7 +851,7 @@ lemma snd_whileLoop_subset:
|
|||
done
|
||||
|
||||
|
||||
subsection "Some rules for whileM"
|
||||
subsection "Some rules for @{const whileM}"
|
||||
|
||||
lemma whileM_wp_gen:
|
||||
assumes termin:"\<And>s. I False s \<Longrightarrow> Q s"
|
||||
|
|
|
@ -11,6 +11,8 @@
|
|||
* Option monad while loop formalisation.
|
||||
*)
|
||||
|
||||
chapter \<open>State Projections and Reader Option Monad\<close>
|
||||
|
||||
theory Reader_Option_Monad
|
||||
imports
|
||||
Monad_Lib
|
||||
|
@ -22,7 +24,7 @@ section \<open>Projections\<close>
|
|||
|
||||
type_synonym ('s,'a) lookup = "'s \<Rightarrow> 'a option"
|
||||
|
||||
text \<open>Similar to map_option but the second function returns option as well\<close>
|
||||
text \<open>Similar to @{const map_option} but the second function returns @{text option} as well\<close>
|
||||
definition opt_map :: "('s,'a) lookup \<Rightarrow> ('a \<Rightarrow> 'b option) \<Rightarrow> ('s,'b) lookup" (infixl "|>" 54) where
|
||||
"f |> g \<equiv> \<lambda>s. case f s of None \<Rightarrow> None | Some x \<Rightarrow> g x"
|
||||
|
||||
|
@ -241,7 +243,7 @@ abbreviation ogets :: "('s \<Rightarrow> 'a) \<Rightarrow> ('s, 'a) lookup" wher
|
|||
|
||||
text \<open>
|
||||
Integration with exception monad.
|
||||
Corresponding bindE would be analogous to lifting in Nondet_Monad.\<close>
|
||||
Corresponding bindE would be analogous to lifting in @{text Nondet_Monad}.\<close>
|
||||
|
||||
definition
|
||||
"oreturnOk x = K (Some (Inr x))"
|
||||
|
@ -250,10 +252,10 @@ definition
|
|||
"othrow e = K (Some (Inl e))"
|
||||
|
||||
definition
|
||||
"oguard G \<equiv> (\<lambda>s. if G s then Some () else None)"
|
||||
"oguard G \<equiv> \<lambda>s. if G s then Some () else None"
|
||||
|
||||
definition
|
||||
"ocondition c L R \<equiv> (\<lambda>s. if c s then L s else R s)"
|
||||
"ocondition c L R \<equiv> \<lambda>s. if c s then L s else R s"
|
||||
|
||||
definition
|
||||
"oskip \<equiv> oreturn ()"
|
||||
|
@ -266,26 +268,26 @@ subsection \<open>Monad laws\<close>
|
|||
|
||||
lemma oreturn_bind[simp]:
|
||||
"(oreturn x |>> f) = f x"
|
||||
by (auto simp add: oreturn_def obind_def K_def)
|
||||
by (auto simp add: oreturn_def obind_def)
|
||||
|
||||
lemma obind_return[simp]:
|
||||
"(m |>> oreturn) = m"
|
||||
by (auto simp add: oreturn_def obind_def K_def split: option.splits)
|
||||
by (auto simp add: oreturn_def obind_def split: option.splits)
|
||||
|
||||
lemma obind_assoc:
|
||||
"(m |>> f) |>> g = m |>> (\<lambda>x. f x |>> g)"
|
||||
by (auto simp add: oreturn_def obind_def K_def split: option.splits)
|
||||
by (auto simp add: oreturn_def obind_def split: option.splits)
|
||||
|
||||
|
||||
subsection \<open>Binding and fail\<close>
|
||||
|
||||
lemma obind_fail [simp]:
|
||||
"f |>> (\<lambda>_. ofail) = ofail"
|
||||
by (auto simp add: ofail_def obind_def K_def split: option.splits)
|
||||
by (auto simp add: ofail_def obind_def split: option.splits)
|
||||
|
||||
lemma ofail_bind [simp]:
|
||||
"ofail |>> m = ofail"
|
||||
by (auto simp add: ofail_def obind_def K_def split: option.splits)
|
||||
by (auto simp add: ofail_def obind_def split: option.splits)
|
||||
|
||||
|
||||
subsection \<open>Function package setup\<close>
|
||||
|
@ -348,11 +350,11 @@ lemma ocondition_True:
|
|||
|
||||
lemma in_oreturn [simp]:
|
||||
"(oreturn x s = Some v) = (v = x)"
|
||||
by (auto simp: oreturn_def K_def)
|
||||
by (auto simp: oreturn_def)
|
||||
|
||||
lemma oreturn_None[simp]:
|
||||
"\<not> oreturn x s = None"
|
||||
by (simp add: oreturn_def K_def)
|
||||
by (simp add: oreturn_def)
|
||||
|
||||
lemma oreturnE:
|
||||
"\<lbrakk>oreturn x s = Some v; v = x \<Longrightarrow> P\<rbrakk> \<Longrightarrow> P"
|
||||
|
@ -360,7 +362,7 @@ lemma oreturnE:
|
|||
|
||||
lemma in_ofail[simp]:
|
||||
"ofail s \<noteq> Some v"
|
||||
by (auto simp: ofail_def K_def)
|
||||
by (auto simp: ofail_def)
|
||||
|
||||
lemma ofailE:
|
||||
"ofail s = Some v \<Longrightarrow> P"
|
||||
|
@ -408,7 +410,7 @@ lemma obindE:
|
|||
|
||||
lemma in_othrow_eq[simp]:
|
||||
"(othrow e s = Some v) = (v = Inl e)"
|
||||
by (auto simp: othrow_def K_def)
|
||||
by (auto simp: othrow_def)
|
||||
|
||||
lemma othrowE:
|
||||
"\<lbrakk>othrow e s = Some v; v = Inl e \<Longrightarrow> P\<rbrakk> \<Longrightarrow> P"
|
||||
|
@ -416,7 +418,7 @@ lemma othrowE:
|
|||
|
||||
lemma in_oreturnOk_eq[simp]:
|
||||
"(oreturnOk x s = Some v) = (v = Inr x)"
|
||||
by (auto simp: oreturnOk_def K_def)
|
||||
by (auto simp: oreturnOk_def)
|
||||
|
||||
lemma oreturnOkE:
|
||||
"\<lbrakk>oreturnOk x s = Some v; v = Inr x \<Longrightarrow> P\<rbrakk> \<Longrightarrow> P"
|
||||
|
@ -434,11 +436,11 @@ lemma in_opt_map_Some_None_eq[simp]:
|
|||
|
||||
lemma oreturn_comp[simp]:
|
||||
"oreturn x \<circ> f = oreturn x"
|
||||
by (simp add: oreturn_def K_def o_def)
|
||||
by (simp add: oreturn_def o_def)
|
||||
|
||||
lemma ofail_comp[simp]:
|
||||
"ofail \<circ> f = ofail"
|
||||
by (auto simp: ofail_def K_def)
|
||||
by (auto simp: ofail_def)
|
||||
|
||||
lemma oassert_comp[simp]:
|
||||
"oassert P \<circ> f = oassert P"
|
||||
|
@ -446,7 +448,7 @@ lemma oassert_comp[simp]:
|
|||
|
||||
lemma fail_apply[simp]:
|
||||
"ofail s = None"
|
||||
by (simp add: ofail_def K_def)
|
||||
by (simp add: ofail_def)
|
||||
|
||||
lemma oassert_apply[simp]:
|
||||
"oassert P s = (if P then Some () else None)"
|
||||
|
@ -474,7 +476,7 @@ lemma if_comp_dist:
|
|||
|
||||
lemma obindK_is_opt_map:
|
||||
"f \<bind> (\<lambda>x. K $ g x) = f |> g"
|
||||
by (simp add: obind_def opt_map_def K_def)
|
||||
by (simp add: obind_def opt_map_def)
|
||||
|
||||
|
||||
section \<open>"While" loops over option monad.\<close>
|
||||
|
|
|
@ -59,6 +59,10 @@ lemmas owhile_add_inv = owhile_inv_def[symmetric]
|
|||
|
||||
|
||||
(* WP rules for ovalid. *)
|
||||
lemma ovalid_post_taut[wp]:
|
||||
"\<lblot>P\<rblot> f \<lblot>\<top>\<top>\<rblot>"
|
||||
by (simp add: ovalid_def)
|
||||
|
||||
lemma ovalid_inv[wp]:
|
||||
"ovalid P f (\<lambda>_. P)"
|
||||
by (simp add: ovalid_def)
|
||||
|
@ -116,8 +120,8 @@ lemma owhile_ovalid[wp]:
|
|||
\<Longrightarrow> ovalid (I a) (owhile_inv C B a I M) Q"
|
||||
unfolding owhile_inv_def owhile_def ovalid_def
|
||||
apply clarify
|
||||
apply (frule_tac I = "\<lambda>a. I a s" in option_while_rule)
|
||||
apply auto
|
||||
apply (frule (1) option_while_rule[where I = "\<lambda>a. I a s" for s])
|
||||
apply auto
|
||||
done
|
||||
|
||||
definition ovalid_property where "ovalid_property P x = (\<lambda>s f. (\<forall>r. Some r = x s f \<longrightarrow> P r s))"
|
||||
|
@ -187,7 +191,7 @@ lemma owhile_NF[wp]:
|
|||
\<Longrightarrow> ovalidNF (I a) (owhile_inv C B a I M) Q"
|
||||
unfolding owhile_inv_def ovalidNF_def ovalid_def
|
||||
apply clarify
|
||||
apply (rule_tac I = I and M = "measure (\<lambda>r. M r s)" in owhile_rule)
|
||||
apply (rule owhile_rule[where I = I and M = "measure (\<lambda>r. M r s)" and s = s for s])
|
||||
apply fastforce
|
||||
apply fastforce
|
||||
apply fastforce
|
||||
|
@ -247,7 +251,7 @@ lemma no_ofail_ogets[wp]:
|
|||
by simp
|
||||
|
||||
lemma no_ofail_obind[wp]:
|
||||
"\<lbrakk> \<And>r. no_ofail (P r) (g r); no_ofail Q f; ovalid Q f P \<rbrakk> \<Longrightarrow> no_ofail Q (obind f g)"
|
||||
"\<lbrakk> \<And>r. no_ofail (R r) (g r); \<lblot>Q\<rblot> f \<lblot>R\<rblot>; no_ofail P f \<rbrakk> \<Longrightarrow> no_ofail (P and Q) (f |>> g)"
|
||||
by (auto simp: no_ofail_def obind_def ovalid_def)
|
||||
|
||||
lemma no_ofail_K_bind[wp]:
|
||||
|
@ -275,6 +279,11 @@ lemma no_ofail_oassert_opt[simp, wp]:
|
|||
"no_ofail (\<lambda>_. P \<noteq> None) (oassert_opt P)"
|
||||
by (simp add: no_ofail_def oassert_opt_def split: option.splits)
|
||||
|
||||
lemma no_ofail_if[wp]:
|
||||
"\<lbrakk> P \<Longrightarrow> no_ofail Q f; \<not> P \<Longrightarrow> no_ofail R g \<rbrakk>
|
||||
\<Longrightarrow> no_ofail (if P then Q else R) (if P then f else g)"
|
||||
by simp
|
||||
|
||||
lemma no_ofail_owhen[wp]:
|
||||
"(P \<Longrightarrow> no_ofail Q f) \<Longrightarrow> no_ofail (if P then Q else \<top>) (owhen P f)"
|
||||
by (simp add: no_ofail_def owhen_def)
|
||||
|
@ -287,11 +296,14 @@ lemma no_ofail_oassert[simp, wp]:
|
|||
"no_ofail (\<lambda>_. P) (oassert P)"
|
||||
by (simp add: oassert_def no_ofail_def)
|
||||
|
||||
lemma no_ofail_gets_the:
|
||||
"no_ofail P f \<Longrightarrow> no_fail P (gets_the (f :: ('s, 'a) lookup))"
|
||||
by (fastforce simp: no_ofail_def no_fail_def gets_the_def gets_def
|
||||
get_def assert_opt_def bind_def return_def fail_def
|
||||
split: option.split)
|
||||
lemma no_ofail_gets_the_eq:
|
||||
"no_ofail P f \<longleftrightarrow> no_fail P (gets_the (f :: ('s, 'a) lookup))"
|
||||
by (auto simp: no_ofail_def no_fail_def gets_the_def gets_def
|
||||
get_def assert_opt_def bind_def return_def fail_def
|
||||
split: option.split)
|
||||
|
||||
lemmas no_ofail_gets_the =
|
||||
no_ofail_gets_the_eq[THEN iffD1]
|
||||
|
||||
lemma no_ofail_is_triple[wp_trip]:
|
||||
"no_ofail P f = triple_judgement P f (\<lambda>s f. f s \<noteq> None)"
|
||||
|
@ -348,7 +360,7 @@ lemma ovalidNF_pre_imp:
|
|||
by (simp add: ovalidNF_def)
|
||||
|
||||
lemma no_ofail_pre_imp:
|
||||
"\<lbrakk> \<And>s. P' s \<Longrightarrow> P s; no_ofail P f \<rbrakk> \<Longrightarrow> no_ofail P' f"
|
||||
"\<lbrakk> no_ofail P f; \<And>s. P' s \<Longrightarrow> P s \<rbrakk> \<Longrightarrow> no_ofail P' f"
|
||||
by (simp add: no_ofail_def)
|
||||
|
||||
lemma ovalid_post_imp:
|
||||
|
|
|
@ -55,10 +55,8 @@ lemma det_UN:
|
|||
lemma bind_detI[simp, intro!]:
|
||||
"\<lbrakk> det f; \<forall>x. det (g x) \<rbrakk> \<Longrightarrow> det (f >>= g)"
|
||||
unfolding bind_def det_def
|
||||
apply clarsimp
|
||||
apply (erule_tac x=s in allE)
|
||||
apply clarsimp
|
||||
done
|
||||
apply (erule all_reg[rotated])
|
||||
by clarsimp
|
||||
|
||||
lemma det_modify[iff]:
|
||||
"det (modify f)"
|
||||
|
|
|
@ -14,9 +14,9 @@ begin
|
|||
section \<open>Monads that are wellformed w.r.t. failure\<close>
|
||||
|
||||
text \<open>
|
||||
Usually, well-formed monads constructed from the primitives in Trace_Monad will have the following
|
||||
property: if they return an empty set of completed results, there exists a trace corresponding to
|
||||
a failed result.\<close>
|
||||
Usually, well-formed monads constructed from the primitives in @{text Trace_Monad} will have the
|
||||
following property: if they return an empty set of completed results, there exists a trace
|
||||
corresponding to a failed result.\<close>
|
||||
definition empty_fail :: "('s,'a) tmonad \<Rightarrow> bool" where
|
||||
"empty_fail m \<equiv> \<forall>s. mres (m s) = {} \<longrightarrow> Failed \<in> snd ` (m s)"
|
||||
|
||||
|
@ -58,8 +58,7 @@ lemma empty_failD3:
|
|||
lemma empty_fail_bindD1:
|
||||
"empty_fail (a >>= b) \<Longrightarrow> empty_fail a"
|
||||
unfolding empty_fail_def bind_def
|
||||
apply clarsimp
|
||||
apply (drule_tac x=s in spec)
|
||||
apply (erule all_reg[rotated])
|
||||
by (force simp: split_def mres_def vimage_def split: tmres.splits)
|
||||
|
||||
|
||||
|
@ -112,10 +111,7 @@ lemma empty_fail_select[empty_fail_cond]:
|
|||
lemma mres_bind_empty:
|
||||
"mres ((f >>= g) s) = {}
|
||||
\<Longrightarrow> mres (f s) = {} \<or> (\<forall>res\<in>mres (f s). mres (g (fst res) (snd res)) = {})"
|
||||
apply clarsimp
|
||||
apply (clarsimp simp: mres_def split_def vimage_def bind_def)
|
||||
apply (rename_tac rv s' trace)
|
||||
apply (drule_tac x=rv in spec, drule_tac x=s' in spec)
|
||||
apply (clarsimp simp: bind_def mres_def split_def)
|
||||
apply (clarsimp simp: image_def)
|
||||
apply fastforce
|
||||
done
|
||||
|
|
|
@ -204,8 +204,8 @@ lemma liftE_liftM:
|
|||
lemma liftME_liftM:
|
||||
"liftME f = liftM (case_sum Inl (Inr \<circ> f))"
|
||||
unfolding liftME_def liftM_def bindE_def returnOk_def lift_def
|
||||
apply (rule ext, rename_tac x)
|
||||
apply (rule_tac f="bind x" in arg_cong)
|
||||
apply (rule ext)
|
||||
apply (rule arg_cong[where f="bind m" for m])
|
||||
apply (fastforce simp: throwError_def split: sum.splits)
|
||||
done
|
||||
|
||||
|
|
|
@ -40,7 +40,7 @@ abbreviation map_tmres_rv :: "('a \<Rightarrow> 'b) \<Rightarrow> ('s, 'a) tmres
|
|||
|
||||
text \<open>
|
||||
tmonad returns a set of non-deterministic computations, including
|
||||
a trace as a list of "thread identifier" \<times> state, and an optional
|
||||
a trace as a list of @{text "thread identifier \<times> state"}, and an optional
|
||||
pair of result and state when the computation did not fail.\<close>
|
||||
type_synonym ('s, 'a) tmonad = "'s \<Rightarrow> ((tmid \<times> 's) list \<times> ('s, 'a) tmres) set"
|
||||
|
||||
|
@ -713,7 +713,7 @@ definition whileLoop ::
|
|||
notation (output)
|
||||
whileLoop ("(whileLoop (_)// (_))" [1000, 1000] 1000)
|
||||
|
||||
\<comment> \<open>FIXME: why does this differ to Nondet_Monad?\<close>
|
||||
\<comment> \<open>FIXME: why does this differ to @{text Nondet_Monad}?\<close>
|
||||
definition whileLoopT ::
|
||||
"('r \<Rightarrow> 's \<Rightarrow> bool) \<Rightarrow> ('r \<Rightarrow> ('s, 'r) tmonad) \<Rightarrow> 'r \<Rightarrow> ('s, 'r) tmonad"
|
||||
where
|
||||
|
@ -795,7 +795,7 @@ definition parallel :: "('s,'a) tmonad \<Rightarrow> ('s,'a) tmonad \<Rightarrow
|
|||
\<and> (map (\<lambda>(f_step, (id, s)). (if f_step then Env else id, s)) (zip f_steps xs), rv) \<in> g s})"
|
||||
|
||||
abbreviation(input)
|
||||
"parallel_mrg \<equiv> ((\<lambda>((idn, s), (idn', _)). (if idn = Env then idn' else idn, s)))"
|
||||
"parallel_mrg \<equiv> \<lambda>((idn, s), (idn', _)). (if idn = Env then idn' else idn, s)"
|
||||
|
||||
lemma parallel_def2:
|
||||
"parallel f g = (\<lambda>s. {(xs, rv). \<exists>ys zs. (ys, rv) \<in> f s \<and> (zs, rv) \<in> g s
|
||||
|
@ -806,6 +806,7 @@ lemma parallel_def2:
|
|||
apply (rule exI, rule conjI, assumption)+
|
||||
apply (simp add: list_all2_conv_all_nth list_eq_iff_nth_eq split_def prod_eq_iff)
|
||||
apply clarsimp
|
||||
apply (rename_tac ys zs)
|
||||
apply (rule_tac x="map (((\<noteq>) Env) o fst) ys" in exI)
|
||||
apply (simp add: zip_map1 o_def split_def)
|
||||
apply (strengthen subst[where P="\<lambda>xs. (xs, v) \<in> S" for v S, mk_strg I _ E])
|
||||
|
|
|
@ -117,35 +117,22 @@ lemma gets_the_returns:
|
|||
by (simp_all add: returnOk_def throwError_def
|
||||
gets_the_return)
|
||||
|
||||
lemma all_rv_choice_fn_eq_pred:
|
||||
"\<lbrakk> \<And>rv. P rv \<Longrightarrow> \<exists>fn. f rv = g fn \<rbrakk> \<Longrightarrow> \<exists>fn. \<forall>rv. P rv \<longrightarrow> f rv = g (fn rv)"
|
||||
apply (rule_tac x="\<lambda>rv. SOME h. f rv = g h" in exI)
|
||||
apply (clarsimp split: if_split)
|
||||
by (meson someI_ex)
|
||||
|
||||
lemma all_rv_choice_fn_eq:
|
||||
"\<lbrakk> \<And>rv. \<exists>fn. f rv = g fn \<rbrakk>
|
||||
\<Longrightarrow> \<exists>fn. f = (\<lambda>rv. g (fn rv))"
|
||||
using all_rv_choice_fn_eq_pred[where f=f and g=g and P=\<top>]
|
||||
by (simp add: fun_eq_iff)
|
||||
|
||||
lemma gets_the_eq_bind:
|
||||
"\<lbrakk> \<exists>fn. f = gets_the (fn o fn'); \<And>rv. \<exists>fn. g rv = gets_the (fn o fn') \<rbrakk>
|
||||
"\<lbrakk> f = gets_the (fn_f o fn'); \<And>rv. g rv = gets_the (fn_g rv o fn') \<rbrakk>
|
||||
\<Longrightarrow> \<exists>fn. (f >>= g) = gets_the (fn o fn')"
|
||||
apply (clarsimp dest!: all_rv_choice_fn_eq)
|
||||
apply (rule_tac x="\<lambda>s. case (fn s) of None \<Rightarrow> None | Some v \<Rightarrow> fna v s" in exI)
|
||||
apply clarsimp
|
||||
apply (rule exI[where x="\<lambda>s. case (fn_f s) of None \<Rightarrow> None | Some v \<Rightarrow> fn_g v s"])
|
||||
apply (simp add: gets_the_def bind_assoc exec_gets
|
||||
assert_opt_def fun_eq_iff
|
||||
split: option.split)
|
||||
done
|
||||
|
||||
lemma gets_the_eq_bindE:
|
||||
"\<lbrakk> \<exists>fn. f = gets_the (fn o fn'); \<And>rv. \<exists>fn. g rv = gets_the (fn o fn') \<rbrakk>
|
||||
"\<lbrakk> f = gets_the (fn_f o fn'); \<And>rv. g rv = gets_the (fn_g rv o fn') \<rbrakk>
|
||||
\<Longrightarrow> \<exists>fn. (f >>=E g) = gets_the (fn o fn')"
|
||||
apply (simp add: bindE_def)
|
||||
apply (erule gets_the_eq_bind)
|
||||
unfolding bindE_def
|
||||
apply (erule gets_the_eq_bind[where fn_g="\<lambda>rv s. case rv of Inl e \<Rightarrow> Some (Inl e) | Inr v \<Rightarrow> fn_g v s"])
|
||||
apply (simp add: lift_def gets_the_returns split: sum.split)
|
||||
apply fastforce
|
||||
done
|
||||
|
||||
lemma gets_the_fail:
|
||||
|
@ -376,7 +363,7 @@ lemma select_empty_bind[simp]:
|
|||
by (simp add: select_def bind_def)
|
||||
|
||||
|
||||
subsection \<open>Alternative env_steps with repeat\<close>
|
||||
subsection \<open>Alternative @{text env_steps} with repeat\<close>
|
||||
|
||||
lemma mapM_Cons:
|
||||
"mapM f (x # xs) = do
|
||||
|
@ -531,7 +518,7 @@ lemma put_trace_mapM_x:
|
|||
|
||||
lemma rev_surj:
|
||||
"surj rev"
|
||||
by (rule_tac f=rev in surjI, simp)
|
||||
by (rule surjI[where f=rev], simp)
|
||||
|
||||
lemma select_image:
|
||||
"select (f ` S) = do x \<leftarrow> select S; return (f x) od"
|
||||
|
|
|
@ -137,8 +137,8 @@ lemma hoare_split_bind_case_sum:
|
|||
"\<And>rv. \<lbrace>S rv\<rbrace> h rv \<lbrace>Q\<rbrace>"
|
||||
assumes y: "\<lbrace>P\<rbrace> f \<lbrace>S\<rbrace>,\<lbrace>R\<rbrace>"
|
||||
shows "\<lbrace>P\<rbrace> f >>= case_sum g h \<lbrace>Q\<rbrace>"
|
||||
apply (rule hoare_seq_ext [OF _ y[unfolded validE_def]])
|
||||
apply (case_tac x, simp_all add: x)
|
||||
apply (rule hoare_seq_ext[OF _ y[unfolded validE_def]])
|
||||
apply (wpsimp wp: x split: sum.splits)
|
||||
done
|
||||
|
||||
lemma hoare_split_bind_case_sumE:
|
||||
|
@ -147,8 +147,8 @@ lemma hoare_split_bind_case_sumE:
|
|||
assumes y: "\<lbrace>P\<rbrace> f \<lbrace>S\<rbrace>,\<lbrace>R\<rbrace>"
|
||||
shows "\<lbrace>P\<rbrace> f >>= case_sum g h \<lbrace>Q\<rbrace>,\<lbrace>E\<rbrace>"
|
||||
apply (unfold validE_def)
|
||||
apply (rule hoare_seq_ext [OF _ y[unfolded validE_def]])
|
||||
apply (case_tac x, simp_all add: x [unfolded validE_def])
|
||||
apply (rule hoare_seq_ext[OF _ y[unfolded validE_def]])
|
||||
apply (wpsimp wp: x[unfolded validE_def] split: sum.splits)
|
||||
done
|
||||
|
||||
lemma assertE_sp:
|
||||
|
@ -256,12 +256,11 @@ lemma doesn't_grow_proof:
|
|||
assumes x: "\<And>x. \<lbrace>\<lambda>s. x \<notin> S s \<and> P s\<rbrace> f \<lbrace>\<lambda>rv s. x \<notin> S s\<rbrace>"
|
||||
shows "\<lbrace>\<lambda>s. card (S s) < n \<and> P s\<rbrace> f \<lbrace>\<lambda>rv s. card (S s) < n\<rbrace>"
|
||||
apply (clarsimp simp: valid_def)
|
||||
apply (subgoal_tac "S b \<subseteq> S s")
|
||||
apply (drule card_mono [OF y], simp)
|
||||
apply (erule le_less_trans[rotated])
|
||||
apply (rule card_mono[OF y])
|
||||
apply clarsimp
|
||||
apply (rule ccontr)
|
||||
apply (subgoal_tac "x \<notin> S b", simp)
|
||||
apply (erule use_valid [OF _ x])
|
||||
apply (drule (2) use_valid[OF _ x, OF _ conjI])
|
||||
apply simp
|
||||
done
|
||||
|
||||
|
@ -297,13 +296,12 @@ lemma shrinks_proof:
|
|||
assumes w: "\<And>s. P s \<Longrightarrow> x \<in> S s"
|
||||
shows "\<lbrace>\<lambda>s. card (S s) \<le> n \<and> P s\<rbrace> f \<lbrace>\<lambda>rv s. card (S s) < n\<rbrace>"
|
||||
apply (clarsimp simp: valid_def)
|
||||
apply (subgoal_tac "S b \<subset> S s")
|
||||
apply (drule psubset_card_mono [OF y], simp)
|
||||
apply (erule less_le_trans[rotated])
|
||||
apply (rule psubset_card_mono[OF y])
|
||||
apply (rule psubsetI)
|
||||
apply clarsimp
|
||||
apply (rule ccontr)
|
||||
apply (subgoal_tac "x \<notin> S b", simp)
|
||||
apply (erule use_valid [OF _ x])
|
||||
apply (drule (2) use_valid[OF _ x, OF _ conjI])
|
||||
apply simp
|
||||
by (metis use_valid w z)
|
||||
|
||||
|
@ -393,13 +391,12 @@ lemma P_bool_lift:
|
|||
assumes f: "\<lbrace>\<lambda>s. \<not>Q s\<rbrace> f \<lbrace>\<lambda>r s. \<not>Q s\<rbrace>"
|
||||
shows "\<lbrace>\<lambda>s. P (Q s)\<rbrace> f \<lbrace>\<lambda>r s. P (Q s)\<rbrace>"
|
||||
apply (clarsimp simp: valid_def)
|
||||
apply (subgoal_tac "Q b = Q s")
|
||||
apply simp
|
||||
apply (rule back_subst[where P=P], assumption)
|
||||
apply (rule iffI)
|
||||
apply (rule classical)
|
||||
apply (drule (1) use_valid [OF _ f])
|
||||
apply simp
|
||||
apply (erule (1) use_valid [OF _ t])
|
||||
apply (erule (1) use_valid [OF _ t])
|
||||
apply (rule classical)
|
||||
apply (drule (1) use_valid [OF _ f])
|
||||
apply simp
|
||||
done
|
||||
|
||||
lemmas fail_inv = hoare_fail_any[where Q="\<lambda>_. P" and P=P for P]
|
||||
|
@ -416,11 +413,10 @@ lemma hoare_Ball_helper:
|
|||
assumes y: "\<And>P. \<lbrace>\<lambda>s. P (S s)\<rbrace> f \<lbrace>\<lambda>rv s. P (S s)\<rbrace>"
|
||||
shows "\<lbrace>\<lambda>s. \<forall>x \<in> S s. P x s\<rbrace> f \<lbrace>\<lambda>rv s. \<forall>x \<in> S s. Q x rv s\<rbrace>"
|
||||
apply (clarsimp simp: valid_def)
|
||||
apply (subgoal_tac "S b = S s")
|
||||
apply (erule post_by_hoare2 [OF x])
|
||||
apply (clarsimp simp: Ball_def)
|
||||
apply (erule_tac P1="\<lambda>x. x = S s" in post_by_hoare2 [OF y])
|
||||
apply (rule refl)
|
||||
apply (drule bspec, erule back_subst[where P="\<lambda>A. x\<in>A" for x])
|
||||
apply (erule post_by_hoare[OF y, rotated])
|
||||
apply (rule refl)
|
||||
apply (erule (1) post_by_hoare[OF x])
|
||||
done
|
||||
|
||||
lemma handy_prop_divs:
|
||||
|
@ -479,14 +475,14 @@ lemma hoare_in_monad_post:
|
|||
assumes x: "\<And>P. \<lbrace>P\<rbrace> f \<lbrace>\<lambda>x. P\<rbrace>"
|
||||
shows "\<lbrace>\<top>\<rbrace> f \<lbrace>\<lambda>rv s. (rv, s) \<in> mres (f s)\<rbrace>"
|
||||
apply (clarsimp simp: valid_def)
|
||||
apply (subgoal_tac "s = b", simp)
|
||||
apply (simp add: state_unchanged [OF x])
|
||||
apply (rule back_subst[where P="\<lambda>s. x\<in>mres (f s)" for x], assumption)
|
||||
apply (simp add: state_unchanged[OF x])
|
||||
done
|
||||
|
||||
lemma list_case_throw_validE_R:
|
||||
"\<lbrakk> \<And>y ys. xs = y # ys \<Longrightarrow> \<lbrace>P\<rbrace> f y ys \<lbrace>Q\<rbrace>,- \<rbrakk> \<Longrightarrow>
|
||||
\<lbrace>P\<rbrace> case xs of [] \<Rightarrow> throwError e | x # xs \<Rightarrow> f x xs \<lbrace>Q\<rbrace>,-"
|
||||
apply (case_tac xs, simp_all)
|
||||
apply (cases xs, simp_all)
|
||||
apply wp
|
||||
done
|
||||
|
||||
|
@ -522,13 +518,13 @@ lemma wp_split_const_if:
|
|||
assumes x: "\<lbrace>P\<rbrace> f \<lbrace>Q\<rbrace>"
|
||||
assumes y: "\<lbrace>P'\<rbrace> f \<lbrace>Q'\<rbrace>"
|
||||
shows "\<lbrace>\<lambda>s. (G \<longrightarrow> P s) \<and> (\<not> G \<longrightarrow> P' s)\<rbrace> f \<lbrace>\<lambda>rv s. (G \<longrightarrow> Q rv s) \<and> (\<not> G \<longrightarrow> Q' rv s)\<rbrace>"
|
||||
by (case_tac G, simp_all add: x y)
|
||||
by (cases G, simp_all add: x y)
|
||||
|
||||
lemma wp_split_const_if_R:
|
||||
assumes x: "\<lbrace>P\<rbrace> f \<lbrace>Q\<rbrace>,-"
|
||||
assumes y: "\<lbrace>P'\<rbrace> f \<lbrace>Q'\<rbrace>,-"
|
||||
shows "\<lbrace>\<lambda>s. (G \<longrightarrow> P s) \<and> (\<not> G \<longrightarrow> P' s)\<rbrace> f \<lbrace>\<lambda>rv s. (G \<longrightarrow> Q rv s) \<and> (\<not> G \<longrightarrow> Q' rv s)\<rbrace>,-"
|
||||
by (case_tac G, simp_all add: x y)
|
||||
by (cases G, simp_all add: x y)
|
||||
|
||||
lemma hoare_disj_division:
|
||||
"\<lbrakk> P \<or> Q; P \<Longrightarrow> \<lbrace>R\<rbrace> f \<lbrace>S\<rbrace>; Q \<Longrightarrow> \<lbrace>T\<rbrace> f \<lbrace>S\<rbrace> \<rbrakk>
|
||||
|
@ -589,11 +585,12 @@ lemma univ_wp:
|
|||
lemma univ_get_wp:
|
||||
assumes x: "\<And>P. \<lbrace>P\<rbrace> f \<lbrace>\<lambda>rv. P\<rbrace>"
|
||||
shows "\<lbrace>\<lambda>s. \<forall>(rv, s') \<in> mres (f s). s = s' \<longrightarrow> Q rv s'\<rbrace> f \<lbrace>Q\<rbrace>"
|
||||
apply (rule hoare_pre_imp [OF _ univ_wp])
|
||||
apply (rule hoare_pre_imp[OF _ univ_wp])
|
||||
apply clarsimp
|
||||
apply (drule bspec, assumption, simp)
|
||||
apply (subgoal_tac "s = b", simp)
|
||||
apply (simp add: state_unchanged [OF x])
|
||||
apply (drule mp)
|
||||
apply (simp add: state_unchanged[OF x])
|
||||
apply simp
|
||||
done
|
||||
|
||||
lemma other_hoare_in_monad_post:
|
||||
|
@ -630,10 +627,10 @@ lemma bindE_split_recursive_asm:
|
|||
apply (clarsimp simp: validE_def valid_def bindE_def in_bind lift_def)
|
||||
apply (erule allE, erule(1) impE)
|
||||
apply (drule(1) bspec, simp)
|
||||
apply (case_tac x, simp_all add: in_throwError)
|
||||
apply (clarsimp simp: in_throwError split: sum.splits)
|
||||
apply (drule x)
|
||||
apply (clarsimp simp: validE_def valid_def)
|
||||
apply (drule(1) bspec, simp)
|
||||
apply (drule(1) bspec, simp split: sum.splits)
|
||||
done
|
||||
|
||||
lemma validE_R_abstract_rv:
|
||||
|
@ -687,9 +684,8 @@ lemma valid_rv_split:
|
|||
lemma hoare_rv_split:
|
||||
"\<lbrakk>\<lbrace>P\<rbrace> f \<lbrace>\<lambda>rv s. rv \<longrightarrow> (Q rv s)\<rbrace>; \<lbrace>P\<rbrace> f \<lbrace>\<lambda>rv s. (\<not>rv) \<longrightarrow> (Q rv s)\<rbrace>\<rbrakk>
|
||||
\<Longrightarrow> \<lbrace>P\<rbrace> f \<lbrace>Q\<rbrace>"
|
||||
apply (clarsimp simp: valid_def)
|
||||
apply (case_tac a, fastforce+)
|
||||
done
|
||||
apply (clarsimp simp: valid_def split_def)
|
||||
by (metis (full_types) fst_eqD snd_conv)
|
||||
|
||||
lemma combine_validE:
|
||||
"\<lbrakk> \<lbrace> P \<rbrace> x \<lbrace> Q \<rbrace>,\<lbrace> E \<rbrace>; \<lbrace> P' \<rbrace> x \<lbrace> Q' \<rbrace>,\<lbrace> E' \<rbrace> \<rbrakk>
|
||||
|
@ -718,23 +714,19 @@ lemma validE_pre_satisfies_post:
|
|||
|
||||
lemma hoare_validE_R_conjI:
|
||||
"\<lbrakk> \<lbrace>P\<rbrace> f \<lbrace>Q\<rbrace>, - ; \<lbrace>P\<rbrace> f \<lbrace>Q'\<rbrace>, - \<rbrakk> \<Longrightarrow> \<lbrace>P\<rbrace> f \<lbrace>\<lambda>rv s. Q rv s \<and> Q' rv s\<rbrace>, -"
|
||||
apply (clarsimp simp: Ball_def validE_R_def validE_def valid_def)
|
||||
by (case_tac a; fastforce)
|
||||
by (fastforce simp: Ball_def validE_R_def validE_def valid_def split: sum.splits)
|
||||
|
||||
lemma hoare_validE_E_conjI:
|
||||
"\<lbrakk> \<lbrace>P\<rbrace> f -, \<lbrace>Q\<rbrace> ; \<lbrace>P\<rbrace> f -, \<lbrace>Q'\<rbrace> \<rbrakk> \<Longrightarrow> \<lbrace>P\<rbrace> f -, \<lbrace>\<lambda>rv s. Q rv s \<and> Q' rv s\<rbrace>"
|
||||
apply (clarsimp simp: Ball_def validE_E_def validE_def valid_def)
|
||||
by (case_tac a; fastforce)
|
||||
by (fastforce simp: Ball_def validE_E_def validE_def valid_def split: sum.splits)
|
||||
|
||||
lemma validE_R_post_conjD1:
|
||||
"\<lbrace>P\<rbrace> f \<lbrace>\<lambda>r s. Q r s \<and> R r s\<rbrace>,- \<Longrightarrow> \<lbrace>P\<rbrace> f \<lbrace>Q\<rbrace>,-"
|
||||
apply (clarsimp simp: validE_R_def validE_def valid_def)
|
||||
by (case_tac a; fastforce)
|
||||
by (fastforce simp: validE_R_def validE_def valid_def split: sum.splits)
|
||||
|
||||
lemma validE_R_post_conjD2:
|
||||
"\<lbrace>P\<rbrace> f \<lbrace>\<lambda>r s. Q r s \<and> R r s\<rbrace>,- \<Longrightarrow> \<lbrace>P\<rbrace> f \<lbrace>R\<rbrace>,-"
|
||||
apply (clarsimp simp: validE_R_def validE_def valid_def)
|
||||
by (case_tac a; fastforce)
|
||||
by (fastforce simp: validE_R_def validE_def valid_def split: sum.splits)
|
||||
|
||||
lemma throw_opt_wp[wp]:
|
||||
"\<lbrace>if v = None then E ex else Q (the v)\<rbrace> throw_opt ex v \<lbrace>Q\<rbrace>,\<lbrace>E\<rbrace>"
|
||||
|
|
|
@ -144,7 +144,7 @@ lemma no_fail_assume_pre:
|
|||
"(\<And>s. P s \<Longrightarrow> no_fail P f) \<Longrightarrow> no_fail P f"
|
||||
by (simp add: no_fail_def)
|
||||
|
||||
\<comment> \<open>lemma no_fail_liftM_eq[simp]:
|
||||
\<^cancel>\<open>lemma no_fail_liftM_eq[simp]:
|
||||
"no_fail P (liftM f m) = no_fail P m"
|
||||
by (auto simp: liftM_def no_fail_def bind_def return_def)\<close>
|
||||
|
||||
|
|
|
@ -31,7 +31,7 @@ lemma no_throw_def':
|
|||
by (clarsimp simp: no_throw_def validE_def2 split_def split: sum.splits)
|
||||
|
||||
|
||||
subsection \<open>no_throw rules\<close>
|
||||
subsection \<open>@{const no_throw} rules\<close>
|
||||
|
||||
lemma no_throw_returnOk[simp]:
|
||||
"no_throw P (returnOk a)"
|
||||
|
|
|
@ -17,7 +17,7 @@ section \<open>Rely-Guarantee Logic\<close>
|
|||
subsection \<open>Validity\<close>
|
||||
|
||||
text \<open>
|
||||
This section defines a Rely_Guarantee logic for partial correctness for
|
||||
This section defines a Rely-Guarantee logic for partial correctness for
|
||||
the interference trace monad.
|
||||
|
||||
The logic is defined semantically. Rules work directly on the
|
||||
|
@ -36,7 +36,7 @@ text \<open>
|
|||
trace and no successful results then the quintuple is trivially valid. This
|
||||
means @{term "assert P"} does not require us to prove that @{term P} holds,
|
||||
but rather allows us to assume @{term P}! Proving non-failure is done via a
|
||||
separate predicate and calculus (see Trace_No_Fail).\<close>
|
||||
separate predicate and calculus (see @{text Trace_No_Fail}).\<close>
|
||||
|
||||
primrec trace_steps :: "(tmid \<times> 's) list \<Rightarrow> 's \<Rightarrow> (tmid \<times> 's \<times> 's) set" where
|
||||
"trace_steps (elem # trace) s0 = {(fst elem, s0, snd elem)} \<union> trace_steps trace (snd elem)"
|
||||
|
@ -56,7 +56,7 @@ next
|
|||
done
|
||||
qed
|
||||
|
||||
text \<open>rg_pred type: Rely-Guaranty predicates (state before => state after => bool)\<close>
|
||||
text \<open>@{text rg_pred} type: Rely-Guaranty predicates (@{text "state before => state after => bool"})\<close>
|
||||
type_synonym 's rg_pred = "'s \<Rightarrow> 's \<Rightarrow> bool"
|
||||
|
||||
text \<open>Abbreviations for trivial postconditions (taking three arguments):\<close>
|
||||
|
@ -272,8 +272,8 @@ proof (induct n arbitrary: res)
|
|||
case 0 then show ?case by auto
|
||||
next
|
||||
case (Suc n)
|
||||
have drop_1: "\<And>tr res. (tr, res) \<in> f s \<Longrightarrow> \<exists>res'. (drop 1 tr, res') \<in> f s"
|
||||
by (case_tac tr; fastforce dest: prefix_closedD[rotated] intro: Suc)
|
||||
have drop_1: "(tr, res) \<in> f s \<Longrightarrow> \<exists>res'. (drop 1 tr, res') \<in> f s" for tr res
|
||||
by (cases tr; fastforce dest: prefix_closedD[rotated] intro: Suc)
|
||||
show ?case
|
||||
using Suc.hyps[OF Suc.prems]
|
||||
by (metis drop_1[simplified] drop_drop add_0 add_Suc)
|
||||
|
@ -291,7 +291,8 @@ lemma parallel_prefix_closed[wp_split]:
|
|||
"\<lbrakk>prefix_closed f; prefix_closed g\<rbrakk>
|
||||
\<Longrightarrow> prefix_closed (parallel f g)"
|
||||
apply (subst prefix_closed_def, clarsimp simp: parallel_def)
|
||||
apply (case_tac f_steps; clarsimp)
|
||||
apply (subst (asm) zip.zip_Cons)
|
||||
apply (clarsimp split: list.splits)
|
||||
apply (drule(1) prefix_closedD)+
|
||||
apply fastforce
|
||||
done
|
||||
|
@ -338,7 +339,7 @@ lemma guar_cond_drop_Suc:
|
|||
"\<lbrakk>guar_cond R s0 (drop (Suc n) xs);
|
||||
fst (xs ! n) \<noteq> Env \<longrightarrow> R (last_st_tr (drop (Suc n) xs) s0) (snd (xs ! n))\<rbrakk>
|
||||
\<Longrightarrow> guar_cond R s0 (drop n xs)"
|
||||
by (case_tac "n < length xs"; simp add: guar_cond_drop_Suc_eq)
|
||||
by (cases "n < length xs"; simp add: guar_cond_drop_Suc_eq)
|
||||
|
||||
lemma rely_cond_Cons_eq:
|
||||
"rely_cond R s0 (x # xs)
|
||||
|
@ -427,8 +428,9 @@ proof -
|
|||
hd_drop_conv_nth hd_append)
|
||||
apply (fastforce simp: split_def intro!: nth_equalityI)
|
||||
apply clarsimp
|
||||
apply (erule_tac x=n in meta_allE)+
|
||||
apply (drule meta_mp, erule rely_cond_is_drop, simp)
|
||||
apply clarsimp
|
||||
apply (erule meta_allE, drule meta_mp, assumption)+
|
||||
apply (subst(asm) rely_cond_drop_Suc_eq[where xs="map f xs" for f xs], simp)
|
||||
apply (clarsimp simp: last_st_tr_drop_map_zip_hd if_split[where P="\<lambda>x. x = Env"]
|
||||
split_def)
|
||||
|
@ -493,7 +495,7 @@ lemma put_trace_res:
|
|||
\<Longrightarrow> \<exists>n. tr = drop n xs \<and> n \<le> length xs
|
||||
\<and> res = (case n of 0 \<Rightarrow> Result ((), s) | _ \<Rightarrow> Incomplete)"
|
||||
apply (clarsimp simp: put_trace_eq_drop)
|
||||
apply (case_tac n; auto intro: exI[where x=0])
|
||||
apply (auto simp: gr0_conv_Suc intro: exI[where x=0])
|
||||
done
|
||||
|
||||
lemma put_trace_twp[wp]:
|
||||
|
@ -732,13 +734,16 @@ lemmas modify_prefix_closed[simp] =
|
|||
modify_wp[THEN valid_validI_wp[OF no_trace_all(3)], THEN validI_prefix_closed]
|
||||
lemmas await_prefix_closed[simp] = Await_sync_twp[THEN validI_prefix_closed]
|
||||
|
||||
lemma repeat_n_prefix_closed[intro!]:
|
||||
"prefix_closed f \<Longrightarrow> prefix_closed (repeat_n n f)"
|
||||
apply (induct n; simp)
|
||||
apply (auto intro: prefix_closed_bind)
|
||||
done
|
||||
|
||||
lemma repeat_prefix_closed[intro!]:
|
||||
"prefix_closed f \<Longrightarrow> prefix_closed (repeat f)"
|
||||
apply (simp add: repeat_def)
|
||||
apply (rule prefix_closed_bind; clarsimp)
|
||||
apply (rename_tac n)
|
||||
apply (induct_tac n; simp)
|
||||
apply (auto intro: prefix_closed_bind)
|
||||
done
|
||||
|
||||
lemma rely_cond_True[simp]:
|
||||
|
|
|
@ -11,7 +11,7 @@ theory Trace_Total
|
|||
imports Trace_No_Fail
|
||||
begin
|
||||
|
||||
section \<open>Total correctness for Trace_Monad and Trace_Monad with exceptions\<close>
|
||||
section \<open>Total correctness for @{text Trace_Monad} and @{text Trace_Monad} with exceptions\<close>
|
||||
|
||||
subsection Definitions
|
||||
|
||||
|
|
|
@ -33,7 +33,7 @@ text \<open>
|
|||
computation is the empty set then the triple is trivially valid. This means
|
||||
@{term "assert P"} does not require us to prove that @{term P} holds, but
|
||||
rather allows us to assume @{term P}! Proving non-failure is done via a
|
||||
separate predicate and calculus (see Trace_No_Fail).\<close>
|
||||
separate predicate and calculus (see @{text Trace_No_Fail}).\<close>
|
||||
definition valid ::
|
||||
"('s \<Rightarrow> bool) \<Rightarrow> ('s,'a) tmonad \<Rightarrow> ('a \<Rightarrow> 's \<Rightarrow> bool) \<Rightarrow> bool"
|
||||
("\<lbrace>_\<rbrace>/ _ /\<lbrace>_\<rbrace>") where
|
||||
|
@ -1016,7 +1016,7 @@ lemmas liftME_E_E_wp[wp_split] = validE_validE_E [OF liftME_wp, simplified, OF v
|
|||
lemma assert_opt_wp:
|
||||
"\<lbrace>\<lambda>s. x \<noteq> None \<longrightarrow> Q (the x) s\<rbrace> assert_opt x \<lbrace>Q\<rbrace>"
|
||||
unfolding assert_opt_def
|
||||
by (case_tac x; wpsimp wp: fail_wp return_wp)
|
||||
by (cases x; wpsimp wp: fail_wp return_wp)
|
||||
|
||||
lemma gets_the_wp:
|
||||
"\<lbrace>\<lambda>s. (f s \<noteq> None) \<longrightarrow> Q (the (f s)) s\<rbrace> gets_the f \<lbrace>Q\<rbrace>"
|
||||
|
|
|
@ -26,7 +26,7 @@ text \<open>
|
|||
schematic does not have as parameters.
|
||||
|
||||
In the "constructor expression" case, we let users supply additional
|
||||
constructor handlers via the `datatype_schematic` attribute. The method uses
|
||||
constructor handlers via the @{text "datatype_schematic"} attribute. The method uses
|
||||
rules of the following form:
|
||||
|
||||
@{term "\<And>x1 x2 x3. getter (constructor x1 x2 x3) = x2"}
|
||||
|
@ -69,18 +69,18 @@ structure Datatype_Schematic_Data = Generic_Data
|
|||
(
|
||||
\<comment> \<open>
|
||||
Keys are names of datatype constructors (like @{const Cons}), values are
|
||||
`(index, function_name, thm)`.
|
||||
@{text "(index, function_name, thm)"}.
|
||||
|
||||
- `function_name` is the name of an "accessor" function that accesses part
|
||||
- @{text function_name} is the name of an "accessor" function that accesses part
|
||||
of the constructor specified by the key (so the accessor @{const hd} is
|
||||
related to the constructor/key @{const Cons}).
|
||||
|
||||
- `thm` is a theorem showing that the function accesses one of the
|
||||
- @{text thm} is a theorem showing that the function accesses one of the
|
||||
arguments to the constructor (like @{thm list.sel(1)}).
|
||||
|
||||
- `idx` is the index of the constructor argument that the accessor
|
||||
accesses. (eg. since `hd` accesses the first argument, `idx = 0`; since
|
||||
`tl` accesses the second argument, `idx = 1`).
|
||||
- @{text idx} is the index of the constructor argument that the accessor
|
||||
accesses. (eg. since @{const hd} accesses the first argument, @{text "idx = 0"}; since
|
||||
@{const tl} accesses the second argument, @{text "idx = 1"}).
|
||||
\<close>
|
||||
type T = ((int * string * thm) list) Symtab.table;
|
||||
val empty = Symtab.empty;
|
||||
|
@ -287,7 +287,7 @@ lemma selectively_exposing_datatype_arugments:
|
|||
notes get_basic_0.simps[datatype_schematic]
|
||||
shows "\<exists>x. \<forall>a b. x (basic a b) = a"
|
||||
apply (rule exI, (rule allI)+)
|
||||
apply datatype_schem \<comment> \<open>Only exposes `a` to the schematic.\<close>
|
||||
apply datatype_schem \<comment> \<open>Only exposes @{text a} to the schematic.\<close>
|
||||
by (rule refl)
|
||||
|
||||
lemma method_handles_primrecs_with_two_constructors:
|
||||
|
|
|
@ -17,10 +17,10 @@ begin
|
|||
|
||||
|
||||
text \<open>
|
||||
Methods for manipulating the post conditions of hoare triples as if they
|
||||
Methods for manipulating the post conditions of Hoare triples as if they
|
||||
were proper subgoals.
|
||||
|
||||
post_asm can be used with the @ attribute to modify existing proofs. Useful
|
||||
@{text post_asm} can be used with the \@ attribute to modify existing proofs. Useful
|
||||
for proving large postconditions in one proof and then subsequently decomposing it.
|
||||
|
||||
\<close>
|
||||
|
@ -98,8 +98,8 @@ end
|
|||
|
||||
|
||||
text \<open>
|
||||
Method (meant to be used with @ as an attribute) used for producing multiple facts out of
|
||||
a single hoare triple with a conjunction in its post condition.
|
||||
Method (meant to be used with \@ as an attribute) used for producing multiple facts out of
|
||||
a single Hoare triple with a conjunction in its post condition.
|
||||
\<close>
|
||||
|
||||
context begin
|
||||
|
|
|
@ -4,6 +4,8 @@
|
|||
* SPDX-License-Identifier: BSD-2-Clause
|
||||
*)
|
||||
|
||||
section \<open>Weakest Preconditions\<close>
|
||||
|
||||
theory WP
|
||||
imports
|
||||
WP_Pre
|
||||
|
|
|
@ -242,11 +242,13 @@ lemma demo2:
|
|||
\<and> (\<forall>x. I x \<longrightarrow> (case x of None \<Rightarrow> R (Inl 8) | Some y \<Rightarrow> R (Inr y)))
|
||||
\<and> (\<forall>x. I x \<longrightarrow> (case x of None \<Rightarrow> R (Inr 9) | Some y \<Rightarrow> R (Inl (y - 1))))"
|
||||
apply (intro exI conjI[rotated] allI)
|
||||
apply (rename_tac x)
|
||||
apply (case_tac x; simp)
|
||||
apply wpfix
|
||||
apply (rule P)
|
||||
apply wpfix
|
||||
apply (rule P)
|
||||
apply (rename_tac x)
|
||||
apply (case_tac x; simp)
|
||||
apply wpfix
|
||||
apply (rule P)
|
||||
|
|
|
@ -43,7 +43,7 @@ lemma bool_function_four_cases:
|
|||
by (auto simp add: fun_eq_iff all_bool_eq)
|
||||
|
||||
|
||||
text \<open>The ML version of repeat_new is slightly faster than the Eisbach one.\<close>
|
||||
text \<open>The ML version of @{text repeat_new} is slightly faster than the Eisbach one.\<close>
|
||||
|
||||
method_setup repeat_new =
|
||||
\<open>Method.text_closure >> (fn m => fn ctxt => fn facts =>
|
||||
|
@ -116,6 +116,7 @@ private lemma
|
|||
(atomic f (\<lambda>s. A' s \<and> Pres' s) (\<lambda>r s. A r s \<and> Pres r s) B Q' \<Longrightarrow> trip Ts) \<Longrightarrow> trip Ts"
|
||||
apply (erule meta_mp)
|
||||
apply (clarsimp simp: valid_def atomic_def)
|
||||
apply (rename_tac P s r s')
|
||||
apply (drule_tac x=P in spec)
|
||||
apply (drule_tac x=P in meta_spec)
|
||||
apply (drule_tac x=s in spec)+
|
||||
|
@ -142,7 +143,7 @@ private lemma
|
|||
by (simp add: atomic_def )
|
||||
|
||||
text \<open>Decomposing a static term is a waste of time as we know we can lift it
|
||||
out all in one go. Additionally this stops wp_drop_imp from uselessly taking it apart.\<close>
|
||||
out all in one go. Additionally this stops @{text wp_drop_imp} from uselessly taking it apart.\<close>
|
||||
|
||||
|
||||
private definition "static Q = (\<lambda>r s. Q)"
|
||||
|
@ -186,7 +187,7 @@ private lemma
|
|||
private lemma trips_True: "trip True" by (simp add: trip_def)
|
||||
|
||||
|
||||
text \<open>We need to push the quantifiers into the hoare triples.
|
||||
text \<open>We need to push the quantifiers into the Hoare triples.
|
||||
This is an unfortunate bit of manual work, but anything more than 2 levels
|
||||
of nesting is unlikely.\<close>
|
||||
|
||||
|
@ -209,7 +210,7 @@ text \<open>Existentials are hard, and we don't see them often
|
|||
we fail to process the triple and it won't be lifted.
|
||||
|
||||
Some more work here to allow the heuristics to drop any added implications
|
||||
if they're deemed unecessary.\<close>
|
||||
if they're deemed unnecessary.\<close>
|
||||
|
||||
private lemma trips_push_ex1:
|
||||
"trip (\<exists>x. \<lbrace>\<lambda>s. Q s\<rbrace> f \<lbrace>\<lambda>r s. Q' x r s\<rbrace>) \<Longrightarrow>
|
||||
|
@ -269,7 +270,7 @@ private method uses_arg for C :: "'a \<Rightarrow> 'b \<Rightarrow> bool" =
|
|||
determ \<open>(match (C) in "\<lambda>r s. ?discard_r s" (cut) \<Rightarrow> \<open>fail\<close> \<bar> _ \<Rightarrow> \<open>-\<close>)\<close>
|
||||
|
||||
text \<open>Here the "test" constant holds information about the logical context of the atomic postcondition
|
||||
in the original hoare triple. "f" is the function with its arguments, "C" is all the collected
|
||||
in the original Hoare triple. "f" is the function with its arguments, "C" is all the collected
|
||||
premises and "Q" is the atomic postcondition that we want to solve in isolation.
|
||||
|
||||
The method succeeds if the atomic postcondition seems to not depend on its context, i.e.
|
||||
|
@ -305,7 +306,7 @@ private method make_goals methods wp_weak wp_strong tests =
|
|||
|
||||
text \<open>Once all the triples exist we simplify them all in one go to
|
||||
find trivial or self-contradictory rules. This avoids invoking the simplifier
|
||||
once per postcondition. imp_conjL is used to curry our generated implications.
|
||||
once per postcondition. @{thm imp_conjL} is used to curry our generated implications.
|
||||
|
||||
If all the postconditions together are contradictory, the simplifier won't use it to strengthen
|
||||
the postcondition. As an optimization we simply bail out in that case, rather than
|
||||
|
@ -324,7 +325,7 @@ method post_strengthen methods wp_weak wp_strong simp' tests =
|
|||
rule trip_drop,
|
||||
(rule hoare_vcg_prop)?)
|
||||
|
||||
text \<open>The "wpi" named theorem is used to avoid the safety heuristics, effectively
|
||||
text \<open>The @{text wpi} named theorem is used to avoid the safety heuristics, effectively
|
||||
saying that the presence of that postcondition indicates that it should always be lifted.\<close>
|
||||
|
||||
named_theorems wpi
|
||||
|
|
|
@ -12,6 +12,8 @@ imports
|
|||
"HOL-Eisbach.Eisbach_Tools"
|
||||
begin
|
||||
|
||||
section \<open>Creating Schematic Preconditions\<close>
|
||||
|
||||
ML \<open>
|
||||
structure WP_Pre = struct
|
||||
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
*)
|
||||
|
||||
theory More_Numeral_Type
|
||||
imports "Word_Lib.WordSetup"
|
||||
imports "Word_Lib_l4v.WordSetup"
|
||||
begin
|
||||
|
||||
(* This theory provides additional setup for numeral types, which should probably go into
|
||||
|
|
|
@ -18,7 +18,7 @@ imports
|
|||
Oblivious
|
||||
Injection_Handler
|
||||
Monads.Nondet_While_Loop_Rules_Completeness
|
||||
"Word_Lib.Distinct_Prop" (* for distinct_tuple_helper *)
|
||||
"Word_Lib_l4v.Distinct_Prop" (* for distinct_tuple_helper *)
|
||||
Monads.Reader_Option_VCG
|
||||
begin
|
||||
|
||||
|
|
3
lib/ROOT
3
lib/ROOT
|
@ -6,7 +6,7 @@
|
|||
|
||||
chapter Lib
|
||||
|
||||
session Lib (lib) = Word_Lib +
|
||||
session Lib (lib) = Word_Lib_l4v +
|
||||
sessions
|
||||
"HOL-Library"
|
||||
Basics
|
||||
|
@ -67,6 +67,7 @@ session Lib (lib) = Word_Lib +
|
|||
Value_Type
|
||||
Named_Eta
|
||||
Rules_Tac
|
||||
Heap_List
|
||||
|
||||
(* should move to Monads: *)
|
||||
NonDetMonadLemmaBucket
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
*)
|
||||
|
||||
|
||||
theory WordSetup (* part of non-AFP Word_Lib *)
|
||||
theory WordSetup (* part of non-AFP Word_Lib_l4v *)
|
||||
imports
|
||||
Word_Lemmas_64_Internal
|
||||
Distinct_Prop
|
|
@ -5,7 +5,7 @@
|
|||
*)
|
||||
|
||||
|
||||
theory WordSetup (* part of non-AFP Word_Lib *)
|
||||
theory WordSetup (* part of non-AFP Word_Lib_l4v *)
|
||||
imports
|
||||
Word_Lemmas_32_Internal
|
||||
Distinct_Prop
|
|
@ -5,7 +5,7 @@
|
|||
*)
|
||||
|
||||
|
||||
theory WordSetup (* part of non-AFP Word_Lib *)
|
||||
theory WordSetup (* part of non-AFP Word_Lib_l4v *)
|
||||
imports
|
||||
Word_Lemmas_32_Internal
|
||||
Distinct_Prop
|
|
@ -8,10 +8,10 @@ section \<open>Bitwise Operations on integers\<close>
|
|||
|
||||
theory Bits_Int
|
||||
imports
|
||||
"Word_Lib.Most_significant_bit"
|
||||
"Word_Lib.Least_significant_bit"
|
||||
"Word_Lib.Generic_set_bit"
|
||||
"Word_Lib.Bit_Comprehension"
|
||||
"Word_Lib_l4v.Most_significant_bit"
|
||||
"Word_Lib_l4v.Least_significant_bit"
|
||||
"Word_Lib_l4v.Generic_set_bit"
|
||||
"Word_Lib_l4v.Bit_Comprehension"
|
||||
begin
|
||||
|
||||
subsection \<open>Implicit bit representation of \<^typ>\<open>int\<close>\<close>
|
|
@ -6,7 +6,7 @@
|
|||
|
||||
section "Distinct Proposition"
|
||||
|
||||
theory Distinct_Prop (* part of non-AFP Word_Lib *)
|
||||
theory Distinct_Prop (* part of non-AFP Word_Lib_l4v *)
|
||||
imports
|
||||
Many_More
|
||||
"HOL-Library.Prefix_Order"
|
|
@ -198,35 +198,35 @@ text \<open>
|
|||
|
||||
\<^descr>[Syntax]
|
||||
|
||||
\<^descr>[\<^theory>\<open>Word_Lib.Syntax_Bundles\<close>]
|
||||
\<^descr>[\<^theory>\<open>Word_Lib_l4v.Syntax_Bundles\<close>]
|
||||
Bundles to provide alternative syntax for various bit operations.
|
||||
|
||||
\<^descr>[\<^theory>\<open>Word_Lib.Hex_Words\<close>]
|
||||
\<^descr>[\<^theory>\<open>Word_Lib_l4v.Hex_Words\<close>]
|
||||
Printing word numerals as hexadecimal numerals.
|
||||
|
||||
\<^descr>[\<^theory>\<open>Word_Lib.Type_Syntax\<close>]
|
||||
\<^descr>[\<^theory>\<open>Word_Lib_l4v.Type_Syntax\<close>]
|
||||
Pretty type-sensitive syntax for cast operations.
|
||||
|
||||
\<^descr>[\<^theory>\<open>Word_Lib.Word_Syntax\<close>]
|
||||
\<^descr>[\<^theory>\<open>Word_Lib_l4v.Word_Syntax\<close>]
|
||||
Specific ASCII syntax for prominent bit operations on word.
|
||||
|
||||
\<^descr>[Proof tools]
|
||||
|
||||
\<^descr>[\<^theory>\<open>Word_Lib.Norm_Words\<close>]
|
||||
\<^descr>[\<^theory>\<open>Word_Lib_l4v.Norm_Words\<close>]
|
||||
Rewriting word numerals to normal forms.
|
||||
|
||||
\<^descr>[\<^theory>\<open>Word_Lib.Bitwise\<close>]
|
||||
\<^descr>[\<^theory>\<open>Word_Lib_l4v.Bitwise\<close>]
|
||||
Method @{method word_bitwise} decomposes word equalities and inequalities into bit propositions.
|
||||
|
||||
\<^descr>[\<^theory>\<open>Word_Lib.Bitwise_Signed\<close>]
|
||||
\<^descr>[\<^theory>\<open>Word_Lib_l4v.Bitwise_Signed\<close>]
|
||||
Method @{method word_bitwise_signed} decomposes word equalities and inequalities into bit propositions.
|
||||
|
||||
\<^descr>[\<^theory>\<open>Word_Lib.Word_EqI\<close>]
|
||||
\<^descr>[\<^theory>\<open>Word_Lib_l4v.Word_EqI\<close>]
|
||||
Method @{method word_eqI_solve} decomposes word equalities and inequalities into bit propositions.
|
||||
|
||||
\<^descr>[Operations]
|
||||
|
||||
\<^descr>[\<^theory>\<open>Word_Lib.Signed_Division_Word\<close>]
|
||||
\<^descr>[\<^theory>\<open>Word_Lib_l4v.Signed_Division_Word\<close>]
|
||||
|
||||
Signed division on word:
|
||||
|
||||
|
@ -234,16 +234,16 @@ text \<open>
|
|||
|
||||
\<^item> @{term [source] \<open>(smod) :: 'a::len word \<Rightarrow> 'a word \<Rightarrow> 'a word\<close>}
|
||||
|
||||
\<^descr>[\<^theory>\<open>Word_Lib.Aligned\<close>] \
|
||||
\<^descr>[\<^theory>\<open>Word_Lib_l4v.Aligned\<close>] \
|
||||
|
||||
\<^item> @{thm [mode=iff] is_aligned_iff_udvd [no_vars]}
|
||||
|
||||
\<^descr>[\<^theory>\<open>Word_Lib.Least_significant_bit\<close>]
|
||||
\<^descr>[\<^theory>\<open>Word_Lib_l4v.Least_significant_bit\<close>]
|
||||
|
||||
The least significant bit as an alias:
|
||||
@{thm [mode=iff] lsb_odd [where ?'a = int, no_vars]}
|
||||
|
||||
\<^descr>[\<^theory>\<open>Word_Lib.Most_significant_bit\<close>]
|
||||
\<^descr>[\<^theory>\<open>Word_Lib_l4v.Most_significant_bit\<close>]
|
||||
|
||||
The most significant bit:
|
||||
|
||||
|
@ -255,7 +255,7 @@ text \<open>
|
|||
|
||||
\<^item> @{thm [mode=iff] msb_word_iff_bit [no_vars]}
|
||||
|
||||
\<^descr>[\<^theory>\<open>Word_Lib.Bit_Shifts_Infix_Syntax\<close>]
|
||||
\<^descr>[\<^theory>\<open>Word_Lib_l4v.Bit_Shifts_Infix_Syntax\<close>]
|
||||
|
||||
Bit shifts decorated with infix syntax:
|
||||
|
||||
|
@ -265,57 +265,57 @@ text \<open>
|
|||
|
||||
\<^item> @{thm Bit_Shifts_Infix_Syntax.sshiftr_def [no_vars]}
|
||||
|
||||
\<^descr>[\<^theory>\<open>Word_Lib.Next_and_Prev\<close>] \
|
||||
\<^descr>[\<^theory>\<open>Word_Lib_l4v.Next_and_Prev\<close>] \
|
||||
|
||||
\<^item> @{thm word_next_unfold [no_vars]}
|
||||
|
||||
\<^item> @{thm word_prev_unfold [no_vars]}
|
||||
|
||||
\<^descr>[\<^theory>\<open>Word_Lib.Enumeration_Word\<close>]
|
||||
\<^descr>[\<^theory>\<open>Word_Lib_l4v.Enumeration_Word\<close>]
|
||||
|
||||
More on explicit enumeration of word types.
|
||||
|
||||
\<^descr>[\<^theory>\<open>Word_Lib.More_Word_Operations\<close>]
|
||||
\<^descr>[\<^theory>\<open>Word_Lib_l4v.More_Word_Operations\<close>]
|
||||
|
||||
Even more operations on word.
|
||||
|
||||
\<^descr>[Types]
|
||||
|
||||
\<^descr>[\<^theory>\<open>Word_Lib.Signed_Words\<close>]
|
||||
\<^descr>[\<^theory>\<open>Word_Lib_l4v.Signed_Words\<close>]
|
||||
|
||||
Formal tagging of word types with a \<^text>\<open>signed\<close> marker.
|
||||
|
||||
\<^descr>[Lemmas]
|
||||
|
||||
\<^descr>[\<^theory>\<open>Word_Lib.More_Word\<close>]
|
||||
\<^descr>[\<^theory>\<open>Word_Lib_l4v.More_Word\<close>]
|
||||
|
||||
More lemmas on words.
|
||||
|
||||
\<^descr>[\<^theory>\<open>Word_Lib.Word_Lemmas\<close>]
|
||||
\<^descr>[\<^theory>\<open>Word_Lib_l4v.Word_Lemmas\<close>]
|
||||
|
||||
More lemmas on words, covering many other theories mentioned here.
|
||||
|
||||
\<^descr>[Words of popular lengths].
|
||||
|
||||
\<^descr>[\<^theory>\<open>Word_Lib.Word_8\<close>]
|
||||
\<^descr>[\<^theory>\<open>Word_Lib_l4v.Word_8\<close>]
|
||||
|
||||
for 8-bit words.
|
||||
|
||||
\<^descr>[\<^theory>\<open>Word_Lib.Word_16\<close>]
|
||||
\<^descr>[\<^theory>\<open>Word_Lib_l4v.Word_16\<close>]
|
||||
|
||||
for 16-bit words.
|
||||
|
||||
\<^descr>[\<^theory>\<open>Word_Lib.Word_32\<close>]
|
||||
\<^descr>[\<^theory>\<open>Word_Lib_l4v.Word_32\<close>]
|
||||
|
||||
for 32-bit words.
|
||||
|
||||
\<^descr>[\<^theory>\<open>Word_Lib.Word_64\<close>]
|
||||
\<^descr>[\<^theory>\<open>Word_Lib_l4v.Word_64\<close>]
|
||||
|
||||
for 64-bit words. This theory is not part of \<^text>\<open>Word_Lib_Sumo\<close>, because it shadows
|
||||
names from \<^theory>\<open>Word_Lib.Word_32\<close>. They can be used together, but then will have
|
||||
names from \<^theory>\<open>Word_Lib_l4v.Word_32\<close>. They can be used together, but then will have
|
||||
to use qualified names in applications.
|
||||
|
||||
\<^descr>[\<^theory>\<open>Word_Lib.Machine_Word_32\<close> and \<^theory>\<open>Word_Lib.Machine_Word_64\<close>]
|
||||
\<^descr>[\<^theory>\<open>Word_Lib_l4v.Machine_Word_32\<close> and \<^theory>\<open>Word_Lib_l4v.Machine_Word_64\<close>]
|
||||
|
||||
provide lemmas for 32-bit words and 64-bit words under the same name,
|
||||
which can help to organize applications relying on some form
|
||||
|
@ -343,36 +343,36 @@ text \<open>
|
|||
from those theories. However theorem coverage may still
|
||||
be terse in some cases.
|
||||
|
||||
\<^descr>[\<^theory>\<open>Word_Lib.Word_Lib_Sumo\<close>]
|
||||
\<^descr>[\<^theory>\<open>Word_Lib_l4v.Word_Lib_Sumo\<close>]
|
||||
|
||||
An entry point importing any relevant theory in that session. Intended
|
||||
for backward compatibility: start importing this theory when
|
||||
migrating applications to Isabelle2021, and later sort out
|
||||
what you really need. You may need to include
|
||||
\<^theory>\<open>Word_Lib.Word_64\<close> separately.
|
||||
\<^theory>\<open>Word_Lib_l4v.Word_64\<close> separately.
|
||||
|
||||
\<^descr>[\<^theory>\<open>Word_Lib.Generic_set_bit\<close>]
|
||||
\<^descr>[\<^theory>\<open>Word_Lib_l4v.Generic_set_bit\<close>]
|
||||
|
||||
Kind of an alias: @{thm set_bit_eq [no_vars]}
|
||||
|
||||
\<^descr>[\<^theory>\<open>Word_Lib.Typedef_Morphisms\<close>]
|
||||
\<^descr>[\<^theory>\<open>Word_Lib_l4v.Typedef_Morphisms\<close>]
|
||||
|
||||
A low-level extension to HOL typedef providing
|
||||
conversions along type morphisms. The @{method transfer} method
|
||||
seems to be sufficient for most applications though.
|
||||
|
||||
\<^descr>[\<^theory>\<open>Word_Lib.Bit_Comprehension\<close>]
|
||||
\<^descr>[\<^theory>\<open>Word_Lib_l4v.Bit_Comprehension\<close>]
|
||||
|
||||
Comprehension syntax for bit values over predicates
|
||||
\<^typ>\<open>nat \<Rightarrow> bool\<close>, for \<^typ>\<open>'a::len word\<close>; straightforward
|
||||
alternatives exist.
|
||||
|
||||
\<^descr>[\<^theory>\<open>Word_Lib.Bit_Comprehension_Int\<close>]
|
||||
\<^descr>[\<^theory>\<open>Word_Lib_l4v.Bit_Comprehension_Int\<close>]
|
||||
|
||||
Comprehension syntax for bit values over predicates
|
||||
\<^typ>\<open>nat \<Rightarrow> bool\<close>, for \<^typ>\<open>int\<close>; inherently non-computational.
|
||||
|
||||
\<^descr>[\<^theory>\<open>Word_Lib.Reversed_Bit_Lists\<close>]
|
||||
\<^descr>[\<^theory>\<open>Word_Lib_l4v.Reversed_Bit_Lists\<close>]
|
||||
|
||||
Representation of bit values as explicit list in
|
||||
\<^emph>\<open>reversed\<close> order.
|
||||
|
@ -383,11 +383,11 @@ text \<open>
|
|||
|
||||
@{thm [display] horner_sum_bit_eq_take_bit [where ?'a = int, no_vars]}
|
||||
|
||||
\<^descr>[\<^theory>\<open>Word_Lib.Many_More\<close>]
|
||||
\<^descr>[\<^theory>\<open>Word_Lib_l4v.Many_More\<close>]
|
||||
|
||||
Collection of operations and theorems which are kept for backward
|
||||
compatibility and not used in other theories in session \<^text>\<open>Word_Lib\<close>.
|
||||
They are used in applications of \<^text>\<open>Word_Lib\<close>, but should be migrated to there.
|
||||
compatibility and not used in other theories in session \<^text>\<open>Word_Lib_l4v\<close>.
|
||||
They are used in applications of \<^text>\<open>Word_Lib_l4v\<close>, but should be migrated to there.
|
||||
\<close>
|
||||
|
||||
section \<open>Changelog\<close>
|
||||
|
@ -395,31 +395,31 @@ section \<open>Changelog\<close>
|
|||
text \<open>
|
||||
\<^descr>[Changes since AFP 2022] ~
|
||||
|
||||
\<^item> Theory \<^text>\<open>Word_Lib.Ancient_Numeral\<close> has been removed from session.
|
||||
\<^item> Theory \<^text>\<open>Word_Lib_l4v.Ancient_Numeral\<close> has been removed from session.
|
||||
|
||||
\<^item> Bit comprehension syntax for \<^typ>\<open>int\<close> moved to separate theory
|
||||
\<^theory>\<open>Word_Lib.Bit_Comprehension_Int\<close>.
|
||||
\<^theory>\<open>Word_Lib_l4v.Bit_Comprehension_Int\<close>.
|
||||
|
||||
\<^descr>[Changes since AFP 2021] ~
|
||||
|
||||
\<^item> Theory \<^text>\<open>Word_Lib.Ancient_Numeral\<close> is not part of \<^theory>\<open>Word_Lib.Word_Lib_Sumo\<close>
|
||||
\<^item> Theory \<^text>\<open>Word_Lib_l4v.Ancient_Numeral\<close> is not part of \<^theory>\<open>Word_Lib_l4v.Word_Lib_Sumo\<close>
|
||||
any longer.
|
||||
|
||||
\<^item> Infix syntax for \<^term>\<open>(AND)\<close>, \<^term>\<open>(OR)\<close>, \<^term>\<open>(XOR)\<close> organized in
|
||||
syntax bundle \<^bundle>\<open>bit_operations_syntax\<close>.
|
||||
|
||||
\<^item> Abbreviation \<^abbrev>\<open>max_word\<close> moved from distribution into theory
|
||||
\<^theory>\<open>Word_Lib.Legacy_Aliases\<close>.
|
||||
\<^theory>\<open>Word_Lib_l4v.Legacy_Aliases\<close>.
|
||||
|
||||
\<^item> Operation \<^const>\<open>test_bit\<close> replaced by input abbreviation \<^abbrev>\<open>test_bit\<close>.
|
||||
|
||||
\<^item> Abbreviations \<^abbrev>\<open>bin_nth\<close>, \<^abbrev>\<open>bin_last\<close>, \<^abbrev>\<open>bin_rest\<close>,
|
||||
\<^abbrev>\<open>bintrunc\<close>, \<^abbrev>\<open>sbintrunc\<close>, \<^abbrev>\<open>norm_sint\<close>,
|
||||
\<^abbrev>\<open>bin_cat\<close> moved into theory \<^theory>\<open>Word_Lib.Legacy_Aliases\<close>.
|
||||
\<^abbrev>\<open>bin_cat\<close> moved into theory \<^theory>\<open>Word_Lib_l4v.Legacy_Aliases\<close>.
|
||||
|
||||
\<^item> Operations \<^abbrev>\<open>bshiftr1\<close>,
|
||||
\<^abbrev>\<open>setBit\<close>, \<^abbrev>\<open>clearBit\<close> moved from distribution into theory
|
||||
\<^theory>\<open>Word_Lib.Legacy_Aliases\<close> and replaced by input abbreviations.
|
||||
\<^theory>\<open>Word_Lib_l4v.Legacy_Aliases\<close> and replaced by input abbreviations.
|
||||
|
||||
\<^item> Operations \<^const>\<open>shiftl1\<close>, \<^const>\<open>shiftr1\<close>, \<^const>\<open>sshiftr1\<close>
|
||||
moved here from distribution.
|
|
@ -5,7 +5,7 @@
|
|||
*)
|
||||
|
||||
|
||||
theory WordSetup (* part of non-AFP Word_Lib *)
|
||||
theory WordSetup (* part of non-AFP Word_Lib_l4v *)
|
||||
imports
|
||||
Word_Lemmas_64_Internal
|
||||
Distinct_Prop
|
|
@ -6,8 +6,8 @@
|
|||
|
||||
chapter Lib
|
||||
|
||||
session Word_Lib (lib) = HOL +
|
||||
options [timeout = 300, document=pdf]
|
||||
session Word_Lib_l4v (lib) = HOL +
|
||||
options [timeout = 900, document=pdf]
|
||||
sessions
|
||||
"HOL-Library"
|
||||
"HOL-Eisbach"
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue