@STRING{pub-springer={Springer} } @STRING{pub-springer:adr="" } @STRING{s-lncs = "LNCS" } @Manual{ wenzel:isabelle-isar:2017, title = {The Isabelle/Isar Reference Manual}, author = {Makarius Wenzel}, year = 2017, note = {Part of the Isabelle distribution.} } @Book{ adler:r:2010, abstract = {Presents a guide to the R computer language, covering such topics as the user interface, packages, syntax, objects, functions, object-oriented programming, data sets, lattice graphics, regression models, and bioconductor.}, added-at = {2013-01-10T22:39:38.000+0100}, address = {Sebastopol, CA}, author = {Adler, Joseph}, isbn = {9780596801700 059680170X}, keywords = {R}, publisher = {O'Reilly}, refid = 432987461, title = {R in a nutshell}, year = 2010 } @InCollection{ wenzel.ea:building:2007, abstract = {We present the generic system framework of Isabelle/Isarunderlying recent versions of Isabelle. Among other things, Isar provides an infrastructure for Isabelle plug-ins, comprising extensible state components and extensible syntax that can be bound to tactical ML programs. Thus the Isabelle/Isar architecture may be understood as an extension and refinement of the traditional LCF approach, with explicit infrastructure for building derivative systems. To demonstrate the technical potential of the framework, we apply it to a concrete formalmethods tool: the HOL-Z 3.0 environment, which is geared towards the analysis of Z specifications and formal proof of forward-refinements.}, author = {Makarius Wenzel and Burkhart Wolff}, booktitle = {TPHOLs 2007}, editor = {Klaus Schneider and Jens Brandt}, language = {USenglish}, acknowledgement={none}, pages = {352--367}, publisher = pub-springer, address = pub-springer:adr, number = 4732, series = s-lncs, title = {Building Formal Method Tools in the {Isabelle}/{Isar} Framework}, doi = {10.1007/978-3-540-74591-4_26}, year = 2007 } @Misc{ w3c:ontologies:2015, title = {Ontologies}, organisation = {W3c}, url = {https://www.w3.org/standards/semanticweb/ontology}, year = 2018 } @Book{ boulanger:cenelec-50128:2015, author = {Boulanger, Jean-Louis}, title = {{CENELEC} 50128 and {IEC} 62279 Standards}, publisher = {Wiley-ISTE}, year = 2015, address = {Boston}, note = {The reference on the standard.} } @Booklet{ cc:cc-part3:2006, bibkey = {cc:cc-part3:2006}, key = {Common Criteria}, institution = {Common Criteria}, language = {USenglish}, month = sep, year = 2006, public = {yes}, title = {Common Criteria for Information Technology Security Evaluation (Version 3.1), {Part} 3: Security assurance components}, note = {Available as document \href{http://www.commoncriteriaportal.org/public/files/CCPART3V3.1R1.pdf} {CCMB-2006-09-003}}, number = {CCMB-2006-09-003}, acknowledgement={brucker, 2007-04-24} } @Book{ nipkow.ea:isabelle:2002, author = {Tobias Nipkow and Lawrence C. Paulson and Markus Wenzel}, title = {Isabelle/HOL---A Proof Assistant for Higher-Order Logic}, publisher = pub-springer, address = pub-springer:adr, series = s-lncs, volume = 2283, doi = {10.1007/3-540-45949-9}, abstract = {This book is a self-contained introduction to interactive proof in higher-order logic (\acs{hol}), using the proof assistant Isabelle2002. It is a tutorial for potential users rather than a monograph for researchers. The book has three parts. 1. Elementary Techniques shows how to model functional programs in higher-order logic. Early examples involve lists and the natural numbers. Most proofs are two steps long, consisting of induction on a chosen variable followed by the auto tactic. But even this elementary part covers such advanced topics as nested and mutual recursion. 2. Logic and Sets presents a collection of lower-level tactics that you can use to apply rules selectively. It also describes Isabelle/\acs{hol}'s treatment of sets, functions and relations and explains how to define sets inductively. One of the examples concerns the theory of model checking, and another is drawn from a classic textbook on formal languages. 3. Advanced Material describes a variety of other topics. Among these are the real numbers, records and overloading. Advanced techniques are described involving induction and recursion. A whole chapter is devoted to an extended example: the verification of a security protocol. }, year = 2002, acknowledgement={brucker, 2007-02-19}, bibkey = {nipkow.ea:isabelle:2002}, tags = {noTAG}, clearance = {unclassified}, timestap = {2008-05-26} } @InProceedings{ wenzel:asynchronous:2014, author = {Makarius Wenzel}, title = {Asynchronous User Interaction and Tool Integration in Isabelle/{PIDE}}, booktitle = {Interactive Theorem Proving (ITP)}, pages = {515--530}, year = 2014, crossref = {klein.ea:interactive:2014}, doi = {10.1007/978-3-319-08970-6_33}, timestamp = {Sun, 21 May 2017 00:18:59 +0200}, abstract = { Historically, the LCF tradition of interactive theorem proving was tied to the read-eval-print loop, with sequential and synchronous evaluation of prover commands given on the command-line. This user-interface technology was adequate when R. Milner introduced his LCF proof assistant in the 1970-ies, but it severely limits the potential of current multicore hardware and advanced IDE front-ends. Isabelle/PIDE breaks this loop and retrofits the read-eval-print phases into an asynchronous model of document-oriented proof processing. Instead of feeding a sequence of individual commands into the prover process, the primary interface works via edits over a family of document versions. Execution is implicit and managed by the prover on its own account in a timeless and stateless manner. Various aspects of interactive proof checking are scheduled according to requirements determined by the front-end perspective on the proof document, while making adequate use of the CPU resources on multicore hardware on the back-end. Recent refinements of Isabelle/PIDE provide an explicit concept of asynchronous print functions over existing proof states. This allows to integrate long-running or potentially non-terminating tools into the document-model. Applications range from traditional proof state output (which may consume substantial time in interactive development) to automated provers and dis-provers that report on existing proof document content (e.g. Sledgehammer, Nitpick, Quickcheck in Isabelle/HOL). Moreover, it is possible to integrate query operations via additional GUI panels with separate input and output (e.g. for Sledgehammer or find-theorems). Thus the Prover IDE provides continuous proof processing, augmented by add-on tools that help the user to continue writing proofs. } } @Proceedings{ klein.ea:interactive:2014, editor = {Gerwin Klein and Ruben Gamboa}, title = {Interactive Theorem Proving - 5th International Conference, {ITP} 2014, Held as Part of the Vienna Summer of Logic, {VSL} 2014, Vienna, Austria, July 14-17, 2014. Proceedings}, series = s-lncs, volume = 8558, publisher = pub-springer, year = 2014, doi = {10.1007/978-3-319-08970-6}, isbn = {978-3-319-08969-0} } @InProceedings{ bezzecchi.ea:making:2018, title = {Making Agile Development Processes fit for V-style Certification Procedures}, author = {Bezzecchi, S. and Crisafulli, P. and Pichot, C. and Wolff, B.}, booktitle = {{ERTS'18}}, abstract = {We present a process for the development of safety and security critical components in transportation systems targeting a high-level certification (CENELEC 50126/50128, DO 178, CC ISO/IEC 15408). The process adheres to the objectives of an ``agile development'' in terms of evolutionary flexibility and continuous improvement. Yet, it enforces the overall coherence of the development artifacts (ranging from proofs over tests to code) by a particular environment (CVCE). In particular, the validation process is built around a formal development based on the interactive theorem proving system Isabelle/HOL, by linking the business logic of the application to the operating system model, down to code and concrete hardware models thanks to a series of refinement proofs. We apply both the process and its support in CVCE to a case-study that comprises a model of an odometric service in a railway-system with its corresponding implementation integrated in seL4 (a secure kernel for which a comprehensive Isabelle development exists). Novel techniques implemented in Isabelle enforce the coherence of semi-formal and formal definitions within to specific certification processes in order to improve their cost-effectiveness. }, pdf = {https://www.lri.fr/~wolff/papers/conf/2018erts-agile-fm.pdf}, year = 2018, series = {ERTS Conference Proceedings}, location = {Toulouse} } @Misc{ owl2012, title = {OWL 2 Web Ontology Language}, note = {\url{https://www.w3.org/TR/owl2-overview/}, Document Overview (Second Edition)}, author = {World Wide Web Consortium} } @Misc{ protege, title = {Prot{\'e}g{\'e}}, note = {\url{https://protege.stanford.edu}}, year = 2018 } @Misc{ cognitum, title = {Fluent Editor}, note = {\url{http://www.cognitum.eu/Semantics/FluentEditor/}}, year = 2018 } @Misc{ neon, title = {The NeOn Toolkit}, note = {\url{http://neon-toolkit.org}}, year = 2018 } @Misc{ owlgred, title = {OWLGrEd}, note = {\url{http://owlgred.lumii.lv/}}, year = 2018 } @Misc{ rontorium, title = {R Language Package for FLuent Editor (rOntorion)}, note = {\url{http://www.cognitum.eu/semantics/FluentEditor/rOntorionFE.aspx}}, year = 2018 } @InProceedings{ DBLP:conf/mkm/BlanchetteHMN15, author = {Jasmin Christian Blanchette and Maximilian P. L. Haslbeck and Daniel Matichuk and Tobias Nipkow}, title = {Mining the Archive of Formal Proofs}, booktitle = {Intelligent Computer Mathematics - International Conference, {CICM} 2015, Washington, DC, USA, July 13-17, 2015, Proceedings}, pages = {3--17}, year = 2015, url = {https://doi.org/10.1007/978-3-319-20615-8\_1}, doi = {10.1007/978-3-319-20615-8\_1}, timestamp = {Fri, 02 Nov 2018 09:40:47 +0100}, biburl = {https://dblp.org/rec/bib/conf/mkm/BlanchetteHMN15}, bibsource = {dblp computer science bibliography, https://dblp.org} } @InCollection{ brucker.ea:isabelle-ontologies:2018, abstract = {While Isabelle is mostly known as part of Isabelle/HOL (an interactive theorem prover), it actually provides a framework for developing a wide spectrum of applications. A particular strength of the Isabelle framework is the combination of text editing, formal verification, and code generation.\\\\Up to now, Isabelle's document preparation system lacks a mechanism for ensuring the structure of different document types (as, e.g., required in certification processes) in general and, in particular, mechanism for linking informal and formal parts of a document.\\\\In this paper, we present Isabelle/DOF, a novel Document Ontology Framework on top of Isabelle. Isabelle/DOF allows for conventional typesetting \emph{as well} as formal development. We show how to model document ontologies inside Isabelle/DOF, how to use the resulting meta-information for enforcing a certain document structure, and discuss ontology-specific IDE support.}, address = {Heidelberg}, author = {Achim D. Brucker and Idir Ait-Sadoune and Paolo Crisafulli and Burkhart Wolff}, booktitle = {Conference on Intelligent Computer Mathematics (CICM)}, doi = {10.1007/978-3-319-96812-4_3}, keywords = {Isabelle/Isar, HOL, Ontologies}, language = {USenglish}, location = {Hagenberg, Austria}, number = 11006, pdf = {https://www.brucker.ch/bibliography/download/2018/brucker.ea-isabelle-ontologies-2018.pdf}, publisher = {Springer-Verlag}, series = {Lecture Notes in Computer Science}, title = {Using the {Isabelle} Ontology Framework: Linking the Formal with the Informal}, url = {https://www.brucker.ch/bibliography/abstract/brucker.ea-isabelle-ontologies-2018}, year = 2018 } @InCollection{ brucker.wolff:isa_def-design-impl:2019, abstract = {DOF is a novel framework for defining ontologies and enforcing them during document development and evolution. A major goal of DOF the integrated development of formal certification documents (e.g., for Common Criteria or CENELEC 50128) that require consistency across both formal and informal arguments. To support a consistent development of formal and informal parts of a document, we implemented Isabelle/DOF, an implementation of DOF on top of the formal methods framework Isabelle/HOL. A particular emphasis is put on a deep integration into Isabelle’s IDE, which allows for smooth ontology development as well as immediate ontological feedback during the editing of a document. In this paper, we give an in-depth presentation of the design concepts of DOF’s Ontology Definition Language (ODL) and key aspects of the technology of its implementation. Isabelle/DOF is the first ontology lan- guage supporting machine-checked links between the formal and informal parts in an LCF-style interactive theorem proving environment. Sufficiently annotated, large documents can easily be developed collaboratively, while ensuring their consistency, and the impact of changes (in the formal and the semi-formal content) is tracked automatically.}, address = {Heidelberg}, author = {Achim D. Brucker and Burkhart Wolff}, booktitle = {International Conference on Software Engineering and Formal Methods}, keywords = {Isabelle/Isar, HOL, Ontologies, Documentation}, language = {USenglish}, location = {Oslo, Austria}, number = "to appear", publisher = {Springer-Verlag}, series = {Lecture Notes in Computer Science}, title = {{I}sabelle/{DOF}: {D}esign and {I}mplementation}, year = 2019 } @InProceedings{ DBLP:conf/itp/Wenzel14, author = {Makarius Wenzel}, title = {Asynchronous User Interaction and Tool Integration in Isabelle/PIDE}, booktitle = {Interactive Theorem Proving (ITP)}, pages = {515--530}, year = 2014, doi = {10.1007/978-3-319-08970-6_33}, timestamp = {Sun, 21 May 2017 00:18:59 +0200}, biburl = {https://dblp.org/rec/bib/conf/itp/Wenzel14}, bibsource = {dblp computer science bibliography, https://dblp.org} } @InProceedings{ DBLP:journals/corr/Wenzel14, author = {Makarius Wenzel}, title = {System description: Isabelle/jEdit in 2014}, booktitle = {Proceedings Eleventh Workshop on User Interfaces for Theorem Provers, {UITP} 2014, Vienna, Austria, 17th July 2014.}, pages = {84--94}, year = 2014, doi = {10.4204/EPTCS.167.10}, timestamp = {Wed, 03 May 2017 14:47:58 +0200}, biburl = {https://dblp.org/rec/bib/journals/corr/Wenzel14}, bibsource = {dblp computer science bibliography, https://dblp.org} } @InProceedings{ DBLP:conf/mkm/BarrasGHRTWW13, author = {Bruno Barras and Lourdes Del Carmen Gonz{\'{a}}lez{-}Huesca and Hugo Herbelin and Yann R{\'{e}}gis{-}Gianas and Enrico Tassi and Makarius Wenzel and Burkhart Wolff}, title = {Pervasive Parallelism in Highly-Trustable Interactive Theorem Proving Systems}, booktitle = {Intelligent Computer Mathematics - MKM, Calculemus, DML, and Systems and Projects}, pages = {359--363}, year = 2013, doi = {10.1007/978-3-642-39320-4_29}, timestamp = {Sun, 04 Jun 2017 10:10:26 +0200}, biburl = {https://dblp.org/rec/bib/conf/mkm/BarrasGHRTWW13}, bibsource = {dblp computer science bibliography, https://dblp.org} } @TechReport{ bsi:50128:2014, type = {Standard}, key = {BS EN 50128:2011}, month = apr, year = 2014, series = {British Standards Publication}, title = {BS EN 50128:2011: Railway applications -- Communication, signalling and processing systems -- Software for railway control and protecting systems}, institution = {Britisch Standards Institute (BSI)}, keywords = {CENELEC}, abstract = {This European Standard is part of a group of related standards. The others are EN 50126-1:1999 "Railway applications -- The specification and demonstration of Reliability, Availability, Maintainability and Safety (RAMS) -- Part 1: Basic requirements and generic process -- and EN 50129:2003 "Railway applications -- Communication, signalling and processing systems -- Safety related electronic systems for signalling". EN 50126-1 addresses system issues on the widest scale, while EN 50129 addresses the approval process for individual systems which can exist within the overall railway control and protection system. This European Standard concentrates on the methods which need to be used in order to provide software which meets the demands for safety integrity which are placed upon it by these wider considerations. This European Standard provides a set of requirements with which the development, deployment and maintenance of any safety-related software intended for railway control and protection applications shall comply. It defines requirements concerning organisational structure, the relationship between organisations and division of responsibility involved in the development, deployment and maintenanceactivities.} } @TechReport{ ds:50126-1:2014, type = {Standard}, key = {DS/EN 50126-1:1999}, month = oct, year = 2014, series = {Dansk standard}, title = {EN 50126-1:1999: Railway applications -- The specification and demonstration of Reliability, Availability, Maintainability and Safety (RAMS) -- Part 1: Basic requirements and generic process}, institution = {Danish Standards Foundation}, keywords = {CENELEC}, abstract = {This European Standard provides Railway Authorities and the railway support industry, throughout the European Union, with a process which will enable the implementation of a consistent approach to the management of reliablity, availability, maintainability and safety, denoted by the acronym RAMS. Processes for the specification and demonstration of RAMS requirements are cornerstones of this standard. This European Standardc aims to promote a common understanding and approach to the management of RAMS. This European Standard can be applied systematically by a railway authority and railway support industry, throughoutall phasesof thelifecycle of a railway application, to develop railway specific RAMS requirements and to achieve compliance with these requirements. The systems-level approach defined by this European Standard facilitates assessment of the RAMS interactions between elements of complex railway applications. This European Standard promotes co-operation between railway authority and railway support industry, within a variety of procurementstrategies, in the achievement of an optimal combination of RAMS and costfor railway applications. Adoption of this European Standard will support the principles of the European Single Market andfacilitate Europeanrailway inter-operability. The process defined by this European Standard assumesthat railway authorities and railway support industry have business-level policies addressing Quality, Performance and Safety. The approach defined in this standard is consistent with the application of quality management requirements contained within the ISO 9000 series of International standards.} } @Book{ paulson:ml:1996, author = {Lawrence C. Paulson}, title = {{ML} for the Working Programmer}, publisher = {Cambridge Press}, year = 1996, url = {http://www.cl.cam.ac.uk/~lp15/MLbook/pub-details.html}, acknowledgement={none} } @Book{ pollak:beginning:2009, title = {Beginning Scala}, author = {David Pollak}, publisher = {Apress}, year = 2009, isbn = {978-1-4302-1989-7} } @Article{ klein:operating:2009, author = {Gerwin Klein}, title = {Operating System Verification --- An Overview}, journal = {S\={a}dhan\={a}}, publisher = pub-springer, year = 2009, volume = 34, number = 1, month = feb, pages = {27--69}, abstract = {This paper gives a high-level introduction to the topic of formal, interactive, machine-checked software verification in general, and the verification of operating systems code in particular. We survey the state of the art, the advantages and limitations of machine-checked code proofs, and describe two specific ongoing larger-scale verification projects in more detail.} } @InProceedings{ wenzel:system:2014, author = {Makarius Wenzel}, title = {System description: Isabelle/jEdit in 2014}, booktitle = {Workshop on User Interfaces for Theorem Provers, {UITP}}, pages = {84--94}, year = 2014, doi = {10.4204/EPTCS.167.10}, timestamp = {Wed, 12 Sep 2018 01:05:15 +0200}, editor = {Christoph Benzm{\"{u}}ller and Bruno {Woltzenlogel Paleo}}, volume = 167 } @InProceedings{ feliachi.ea:circus:2013, author = {Abderrahmane Feliachi and Marie{-}Claude Gaudel and Makarius Wenzel and Burkhart Wolff}, title = {The Circus Testing Theory Revisited in Isabelle/HOL}, booktitle = {{ICFEM}}, series = {Lecture Notes in Computer Science}, volume = 8144, pages = {131--147}, publisher = {Springer}, year = 2013 } @Article{ Klein2014, author = {Gerwin Klein and June Andronick and Kevin Elphinstone and Toby C. Murray and Thomas Sewell and Rafal Kolanski and Gernot Heiser}, title = {Comprehensive formal verification of an {OS} microkernel}, journal = {{ACM} Trans. Comput. Syst.}, year = 2014, volume = 32, number = 1, pages = {2:1--2:70}, bibsource = {dblp computer science bibliography, https://dblp.org}, biburl = {https://dblp.org/rec/bib/journals/tocs/KleinAEMSKH14}, doi = {10.1145/2560537}, timestamp = {Tue, 03 Jan 2017 11:51:57 +0100}, url = {http://doi.acm.org/10.1145/2560537} } @InProceedings{ bicchierai.ea:using:2013, author = {Bicchierai, Irene and Bucci, Giacomo and Nocentini, Carlo and Vicario, Enrico}, editor = {Keller, Hubert B. and Pl{\"o}dereder, Erhard and Dencker, Peter and Klenk, Herbert}, title = {Using Ontologies in the Integration of Structural, Functional, and Process Perspectives in the Development of Safety Critical Systems}, booktitle = {Reliable Software Technologies -- Ada-Europe 2013}, year = 2013, publisher = {Springer Berlin Heidelberg}, address = {Berlin, Heidelberg}, pages = {95--108}, abstract = {We present a systematic approach for the efficient management of the data involved in the development process of safety critical systems, illustrating how the activities performed during the life-cycle can be integrated in a common framework. Information needed in these activities reflects concepts that pertain to three different perspectives: i) structural elements of design and implementation; ii) functional requirements and quality attributes; iii) organization of the overall process. The integration of these concepts may considerably improve the trade-off between reward and effort spent in verification and quality-driven activities.}, isbn = {978-3-642-38601-5} } @Article{ zhao.ea:formal:2016, author = {Yongwang Zhao and David San{\'{a}}n and Fuyuan Zhang and Yang Liu}, title = {Formal Specification and Analysis of Partitioning Operating Systems by Integrating Ontology and Refinement}, journal = {{IEEE} Trans. Industrial Informatics}, volume = 12, number = 4, pages = {1321--1331}, year = 2016, abstract = {Partitioning operating systems (POSs) have been widely applied in safety-critical domains from aerospace to automotive. In order to improve the safety and the certification process of POSs, the ARINC 653 standard has been developed and complied with by the mainstream POSs. Rigorous formalization of ARINC 653 can reveal hidden errors in this standard and provide a necessary foundation for formal verification of POSs and ARINC 653 applica- tions. For the purpose of reusability and efficiency, a novel methodology by integrating ontology and refinement is proposed to formally specify and analyze POSs in this paper. An ontology of POSs is developed as an intermediate model between informal descriptions of ARINC 653 and the formal specification in Event-B. A semiautomatic translation from the ontology and ARINC 653 into Event-B is implemented, which leads to a complete Event-B specification for ARINC 653 compliant POSs. During the formal analysis, six hidden errors in ARINC 653 have been discovered and fixed in the Event-B specification. We also validate the existence of these errors in two open-source POSs, i.e., XtratuM and POK. By introducing the ontology, the degree of automatic verification of the Event-B specification reaches a higher level} } @InProceedings{ denney.ea:evidence:2013, author = {E. {Denney} and G. {Pai}}, booktitle = {2013 IEEE International Symposium on Software Reliability Engineering Workshops (ISSREW)}, title = {Evidence arguments for using formal methods in software certification}, year = 2013, pages = {375--380}, abstract = {We describe a generic approach for automatically integrating the output generated from a formal method/tool into a software safety assurance case, as an evidence argument, by (a) encoding the underlying reasoning as a safety case pattern, and (b) instantiating it using the data produced from the method/tool. We believe this approach not only improves the trustworthiness of the evidence generated from a formal method/tool, by explicitly presenting the reasoning and mechanisms underlying its genesis, but also provides a way to gauge the suitability of the evidence in the context of the wider assurance case. We illustrate our work by application to a real example-an unmanned aircraft system - where we invoke a formal code analysis tool from its autopilot software safety case, automatically transform the verification output into an evidence argument, and then integrate it into the former.}, keywords = {aircraft;autonomous aerial vehicles;formal verification;safety-critical software;evidence arguments;formal methods;software certification;software safety assurance case;safety case pattern;unmanned aircraft system;formal code analysis;autopilot software safety case;verification output;Safety;Software safety;Cognition;Computer architecture;Context;Encoding;Safety cases;Safety case patterns;Formal methods;Argumentation;Software certification}, doi = {10.1109/ISSREW.2013.6688924}, month = {Nov} } @InProceedings{ kaluvuri.ea:quantitative:2014, author = {Kaluvuri, Samuel Paul and Bezzi, Michele and Roudier, Yves}, editor = {Eckert, Claudia and Katsikas, Sokratis K. and Pernul, G{\"u}nther}, title = {A Quantitative Analysis of Common Criteria Certification Practice}, booktitle = {Trust, Privacy, and Security in Digital Business}, year = 2014, publisher = {Springer International Publishing}, address = {Cham}, pages = {132--143}, abstract = {The Common Criteria (CC) certification framework defines a widely recognized, multi-domain certification scheme that aims to provide security assurances about IT products to c\ onsumers. However, the CC scheme does not prescribe a monitoring scheme for the CC practice, raising concerns about the quality of the security assurance provided by the certification a\ nd questions on its usefulness. In this paper, we present a critical analysis of the CC practice that concretely exposes the limitations of current approaches. We also provide direction\ s to improve the CC practice.}, isbn = {978-3-319-09770-1} } @InProceedings{ ekelhart.ea:ontological:2007, author = {Ekelhart, Andreas and Fenz, Stefan and Goluch, Gernot and Weippl, Edgar}, editor = {Venter, Hein and Eloff, Mariki and Labuschagne, Les and Eloff, Jan and von Solms, Rossouw}, title = {Ontological Mapping of Common Criteria's Security Assurance Requirements}, booktitle = {New Approaches for Security, Privacy and Trust in Complex Environments}, year = 2007, publisher = {Springer US}, address = {Boston, MA}, pages = {85--95}, abstract = {The Common Criteria (CC) for Information Technology Security Evaluation provides comprehensive guidelines \ for the evaluation and certification of IT security regarding data security and data privacy. Due to the very comple\ x and time-consuming certification process a lot of companies abstain from a CC certification. We created the CC Ont\ ology tool, which is based on an ontological representation of the CC catalog, to support the evaluator at the certi\ fication process. Tasks such as the planning of an evaluation process, the review of relevant documents or the creat\ ing of reports are supported by the CC Ontology tool. With the development of this tool we reduce the time and costs\ needed to complete a certification.}, isbn = {978-0-387-72367-9} } @InProceedings{ fenz.ea:formalizing:2009, author = {Fenz, Stefan and Ekelhart, Andreas}, title = {Formalizing Information Security Knowledge}, booktitle = {Proceedings of the 4th International Symposium on Information, Computer, and Communications Security}, series = {ASIACCS '09}, year = 2009, isbn = {978-1-60558-394-5}, location = {Sydney, Australia}, pages = {183--194}, numpages = 12, url = {http://doi.acm.org/10.1145/1533057.1533084}, doi = {10.1145/1533057.1533084}, acmid = 1533084, publisher = {ACM}, address = {New York, NY, USA}, keywords = {information security, risk management, security ontology}, abstract = {Unified and formal knowledge models of the information security domain are fundamental requirements for supporting and enhancing existing risk management approaches. This paper describes a security ontology which provides an ontological structure for information security domain knowledge. Besides existing best-practice guidelines such as the German IT Grundschutz Manual also concrete knowledge of the considered organization is incorporated. An evaluation conducted by an information security expert team has shown that this knowledge model can be used to support a broad range of information security risk management approaches.} } @InProceedings{ gleirscher.ea:incremental:2007, author = {M. {Gleirscher} and D. {Ratiu} and B. {Schatz}}, booktitle = {2007 International Conference on Systems Engineering and Modeling}, title = {Incremental Integration of Heterogeneous Systems Views}, year = 2007, pages = {50--59}, abstract = {To master systems complexity, their industrial development requires specialized heterogeneous views and techniques and - correspondingly - engineering tools. These views generally cover only parts of the system under development, and critical development defects often occur at the gaps between them. To successfully achieve an integration that bridges these gaps, we must tackle it both from the methodical as well as from the tooling sides. The former requires answers to questions like: What are the views provided by the tools? How are they related and extended to achieve consistency or to form new views? - while the latter requires answers to: How are views extracted from the tools? How are they composed and provided to the user? Our approach, suitable for incremental integration, is demonstrated in the tool integration framework ToolNet.}, keywords = {computer aided engineering;computer aided software engineering;software tools;heterogeneous systems views;systems complexity;tool integration framework;ToolNet;engineering tools;Systems engineering and theory;Certification;Integrated circuit modeling;Bridges;Software tools;Computer aided software engineering;Computer aided engineering;Costs;Natural languages;Formal specifications}, doi = {10.1109/ICSEM.2007.373334}, month = {March} } @Booklet{ omg:sacm:2018, bibkey = {omg:sacm:2018}, key = omg, abstract = {This specification defines a metamodel for representing structured assurance cases. An Assurance Case is a set of auditable claims, arguments, and evidence created to support the claim that a defined system/service will satisfy the particular requirements. An Assurance Case is a document that facilitates information exchange between various system stakeholders such as suppliers and acquirers, and between the operator and regulator, where the knowledge related to the safety and security of the system is communicated in a clear and defendable way. Each assurance case should communicate the scope of the system, the operational context, the claims, the safety and/or security arguments, along with the corresponding evidence.}, publisher = omg, language = {USenglish}, month = mar, keywords = {SACM}, topic = {formalism}, note = {Available as OMG document \href{http://www.omg.org/cgi-bin/doc?formal/2018-02-02} {formal/2018-02-02}}, public = {yes}, title = {Structured Assurance Case Metamodel (SACM)}, year = 2018 } @InProceedings{ kelly.ea:goal:2004, title = {The Goal Structuring Notation -- A Safety Argument Notation}, booktitle = {Dependable Systems and Networks}, year = 2004, month = jul, author = {Tim Kelly and Rob Weaver} } @TechReport{ rushby:formal:1993, author = {John Rushby}, title = {Formal Methods and the Certification of Critical Systems}, institution = {Computer Science Laboratory, SRI International}, year = 1993, number = {SRI-CSL-93-7}, address = {Menlo Park, CA}, note = {Also issued under the title {\em Formal Methods and Digital Systems Validation for Airborne Systems\/} as NASA Contractor Report 4551, December 1993}, month = dec } @InProceedings{ greenaway.ea:bridging:2012, author = {Greenaway, David and Andronick, June and Klein, Gerwin}, editor = {Beringer, Lennart and Felty, Amy}, title = {Bridging the Gap: Automatic Verified Abstraction of C}, booktitle = {Interactive Theorem Proving}, year = 2012, publisher = {Springer Berlin Heidelberg}, address = {Berlin, Heidelberg}, pages = {99--115}, abstract = {Before low-level imperative code can be reasoned about in an interactive theorem prover, it must first be converted into a logical representation in that theorem prover. Accurate translations of such code should be conservative, choosing safe representations over representations convenient to reason about. This paper bridges the gap between conservative representation and convenient reasoning. We present a tool that automatically abstracts low-level C semantics into higher level specifications, while generating proofs of refinement in Isabelle/HOL for each translation step. The aim is to generate a verified, human-readable specification, convenient for further reasoning.}, isbn = {978-3-642-32347-8} } @inproceedings{BCPW2018, title = {Making Agile Development Processes fit for V-style Certification Procedures}, author = {Bezzecchi, S. and Crisafulli, P. and Pichot, C. and Wolff, B.}, booktitle = {{ERTS'18}}, abstract = {We present a process for the development of safety and security critical components in transportation systems targeting a high-level certification (CENELEC 50126/50128, DO 178, CC ISO/IEC 15408). The process adheres to the objectives of an ``agile development'' in terms of evolutionary flexibility and continuous improvement. Yet, it enforces the overall coherence of the development artifacts (ranging from proofs over tests to code) by a particular environment (CVCE). In particular, the validation process is built around a formal development based on the interactive theorem proving system Isabelle/HOL, by linking the business logic of the application to the operating system model, down to code and concrete hardware models thanks to a series of refinement proofs. We apply both the process and its support in CVCE to a case-study that comprises a model of an odometric service in a railway-system with its corresponding implementation integrated in seL4 (a secure kernel for which a comprehensive Isabelle development exists). Novel techniques implemented in Isabelle enforce the coherence of semi-formal and formal definitions within to specific certification processes in order to improve their cost-effectiveness. }, pdf = {https://www.lri.fr/~wolff/papers/conf/2018erts-agile-fm.pdf}, year = {2018}, series = {ERTS Conference Proceedings}, location = {Toulouse} }