Commented ProgramDerivation, with chapter renumbering in Coq code

This commit is contained in:
Adam Chlipala 2018-05-06 12:53:49 -04:00
parent 5f981335d9
commit a8239e7925
5 changed files with 172 additions and 29 deletions

View file

@ -1,5 +1,5 @@
(** Formal Reasoning About Programs <http://adam.chlipala.net/frap/> (** Formal Reasoning About Programs <http://adam.chlipala.net/frap/>
* Chapter 17: Concurrent Separation Logic * Chapter 18: Concurrent Separation Logic
* Author: Adam Chlipala * Author: Adam Chlipala
* License: https://creativecommons.org/licenses/by-nc-nd/4.0/ *) * License: https://creativecommons.org/licenses/by-nc-nd/4.0/ *)

View file

@ -1,5 +1,5 @@
(** Formal Reasoning About Programs <http://adam.chlipala.net/frap/> (** Formal Reasoning About Programs <http://adam.chlipala.net/frap/>
* Chapter 18: Process Algebra and Behavioral Refinement * Chapter 19: Process Algebra and Behavioral Refinement
* Author: Adam Chlipala * Author: Adam Chlipala
* License: https://creativecommons.org/licenses/by-nc-nd/4.0/ *) * License: https://creativecommons.org/licenses/by-nc-nd/4.0/ *)

View file

@ -4,7 +4,7 @@
* License: https://creativecommons.org/licenses/by-nc-nd/4.0/ * License: https://creativecommons.org/licenses/by-nc-nd/4.0/
* Some material borrowed from Fiat <http://plv.csail.mit.edu/fiat/> *) * Some material borrowed from Fiat <http://plv.csail.mit.edu/fiat/> *)
Require Import FrapWithoutSets. Require Import Frap.
Require Import Program Setoids.Setoid Classes.RelationClasses Classes.Morphisms Morphisms_Prop. Require Import Program Setoids.Setoid Classes.RelationClasses Classes.Morphisms Morphisms_Prop.
Require Import Eqdep. Require Import Eqdep.
@ -14,17 +14,49 @@ Ltac inv_pair :=
end. end.
(* We have generally focused so far on proving that programs meet
* specifications. What if we could generate programs from their
* specifications, in ways that guarantee correctness? Let's explore that
* direction, in the tradition of *program derivation* via
* *stepwise refinement*. *)
(** * The computation monad *) (** * The computation monad *)
Definition comp (A : Type) := A -> Prop. (* One counterintuitive design choice will be to represent specifications and
* implementations in the same "language," which is essentially Gallina with the
* added ability to pick elements nondeterministically from arbitrary sets. *)
(* Specifically, a process producing type [A] is represents as [comp A]. *)
Definition comp (A : Type) := A -> Prop.
(* The computation is represented by the set of legal values it might
* generate. *)
(* Computations form a monad, with the following two operators. *)
Definition ret {A} (x : A) : comp A := eq x. Definition ret {A} (x : A) : comp A := eq x.
(* Note how [eq x] is one way of writing "the singleton set of [x],", using
* partial application of the two-argument equality predicate [eq]. *)
Definition bind {A B} (c1 : comp A) (c2 : A -> comp B) : comp B := Definition bind {A B} (c1 : comp A) (c2 : A -> comp B) : comp B :=
fun b => exists a, c1 a /\ c2 a b. fun b => exists a, c1 a /\ c2 a b.
Definition pick_ {A} (P : A -> Prop) : comp A := P. (* As in some of our earlier examples, [bind] is for sequencing one computation
* after another. For this monad, existential quantification provides a natural
* explanation of sequencing. *)
Definition pick_ {A} (P : A -> Prop) : comp A := P.
(* Here is a convenient wrapper function for injecting an arbitrary set into
* [comp]. This operator stands for nondeterministic choice of any value in the
* set. *)
(* Here is the correctness condition, for when [c2] implements [c1]. From left
* to right in the operands of [refine], we move closer to a concrete
* implementation. *)
Definition refine {A} (c1 c2 : comp A) := Definition refine {A} (c1 c2 : comp A) :=
forall a, c2 a -> c1 a. forall a, c2 a -> c1 a.
(* Note how this definition is just subset inclusion, in the right direction. *)
(* Next, we use Coq's *setoid* feature to declare compatibility of our
* definitions with the [rewrite] tactic. See the Coq manual on setoids for
* background on what we are doing and why. *)
Ltac morphisms := unfold refine, impl, pointwise_relation, bind, ret, pick_; hnf; first_order; subst; eauto. Ltac morphisms := unfold refine, impl, pointwise_relation, bind, ret, pick_; hnf; first_order; subst; eauto.
@ -58,12 +90,19 @@ Proof.
morphisms. morphisms.
Qed. Qed.
(** ** OK, back to the details we want to focus on. *)
(* Here we have one of the monad laws, showing how traditional computational
* reduction is compatible with refinement. *)
Theorem bind_ret : forall A B (v : A) (c2 : A -> comp B), Theorem bind_ret : forall A B (v : A) (c2 : A -> comp B),
refine (bind (ret v) c2) (c2 v). refine (bind (ret v) c2) (c2 v).
Proof. Proof.
morphisms. morphisms.
Qed. Qed.
(* Here's an example specific to this monad. One way to resolve a
* nondeterministic pick from a set is to replace it with a specific element
* from the set. *)
Theorem pick_one : forall {A} {P : A -> Prop} v, Theorem pick_one : forall {A} {P : A -> Prop} v,
P v P v
-> refine (pick_ P) (ret v). -> refine (pick_ P) (ret v).
@ -77,6 +116,8 @@ Notation "x <- c1 ; c2" := (bind c1 (fun x => c2)) (at level 81, right associati
(** * Picking a number not in a list *) (** * Picking a number not in a list *)
(* Let's illustrate the big idea with an example derivation. *)
(* A specification of what it means to choose a number that is not in a (* A specification of what it means to choose a number that is not in a
* particular list *) * particular list *)
Definition notInList (ls : list nat) := Definition notInList (ls : list nat) :=
@ -125,6 +166,8 @@ Proof.
linear_arithmetic. linear_arithmetic.
Qed. Qed.
(** ** Interlude: defining some tactics for key parts of derivation *)
(* We run this next step to hide an evar, so that rewriting isn't too eager to (* We run this next step to hide an evar, so that rewriting isn't too eager to
* make up values for it. *) * make up values for it. *)
Ltac hide_evars := Ltac hide_evars :=
@ -134,9 +177,12 @@ Ltac hide_evars :=
| [ |- refine _ ?f ] => set f | [ |- refine _ ?f ] => set f
end. end.
(* This tactic starts a script that finds a term to refine another. *)
Ltac begin := Ltac begin :=
eexists; simplify; hide_evars. eexists; simplify; hide_evars.
(* This tactic ends such a derivation, in effect undoing the effect of
* [hide_evars]. *)
Ltac finish := Ltac finish :=
match goal with match goal with
| [ |- refine ?e (?f ?arg1 ?arg2) ] => | [ |- refine ?e (?f ?arg1 ?arg2) ] =>
@ -158,24 +204,24 @@ Ltac finish :=
unify f' e; reflexivity unify f' e; reflexivity
end. end.
(** ** Back to the example *)
(* Let's derive an efficient implementation. *) (* Let's derive an efficient implementation. *)
Theorem implementation : { f : list nat -> comp nat | forall ls, refine (notInList ls) (f ls) }. Theorem implementation : sig (fun f : list nat -> comp nat => forall ls, refine (notInList ls) (f ls)).
Proof. Proof.
begin. begin.
rewrite notInList_decompose. rewrite notInList_decompose.
rewrite listMax_refines. rewrite listMax_refines.
setoid_rewrite increment_refines.
(* ^-- Different tactic here to let us rewrite under a binder! *)
rewrite bind_ret. rewrite bind_ret.
rewrite increment_refines.
finish. finish.
Defined. Defined.
(* We can extract the program that we found as a standlone, executable Gallina (* We can extract the program that we found as a standlone, executable Gallina
* term. *) * term. *)
Definition impl := Eval simpl in proj1_sig implementation. Definition impl := Eval simpl in proj1_sig implementation.
Print impl.
(* We'll temporarily expose the definition of [max], so we can compute neatly (* We'll locally expose the definition of [max], so we can compute neatly
* here. *) * here. *)
Transparent max. Transparent max.
Eval compute in impl (1 :: 7 :: 8 :: 2 :: 13 :: 6 :: nil). Eval compute in impl (1 :: 7 :: 8 :: 2 :: 13 :: 6 :: nil).
@ -183,13 +229,32 @@ Eval compute in impl (1 :: 7 :: 8 :: 2 :: 13 :: 6 :: nil).
(** * Abstract data types (ADTs) *) (** * Abstract data types (ADTs) *)
(* Stepwise refinement can be most satisfying to build objects with multiple
* methods. The specification of such an object is often called an abstract
* data type (ADT), and we studied them (from a verification perspective) in
* module DataAbstraction. Let's see how we can build implementations
* automatically from ADTs. First, some preliminary definitions. *)
(* Every method inhabits this type, where [state] is the type of private state
* for the object. *)
Record method_ {state : Type} := { Record method_ {state : Type} := {
MethodName : string; MethodName : string;
MethodBody : state -> nat -> comp (state * nat) MethodBody : state -> nat -> comp (state * nat)
}. }.
(* A body takes the current state as input and produces the new state as output.
* Additionally, we have hardcoded both the parameter type and the return type
* to [nat], for simplicity. The only wrinkle is that a body result is in the
* [comp] monad, to let it mix features from specification and
* implementation. *)
Arguments method_ : clear implicits. Arguments method_ : clear implicits.
Notation "'method' name [[ self , arg ]] = body" :=
{| MethodName := name;
MethodBody := fun self arg => body |}
(at level 100, self at level 0, arg at level 0).
(* Next, this type collects several method definitions, given a list of their
* names. *)
Inductive methods {state : Type} : list string -> Type := Inductive methods {state : Type} : list string -> Type :=
| MethodsNil : methods [] | MethodsNil : methods []
| MethodsCons : forall (m : method_ state) {names}, | MethodsCons : forall (m : method_ state) {names},
@ -198,16 +263,15 @@ Inductive methods {state : Type} : list string -> Type :=
Arguments methods : clear implicits. Arguments methods : clear implicits.
Notation "'method' name [[ self , arg ]] = body" := (* Finally, the definition of an abstract data type, which will apply to both
{| MethodName := name; * specifications (the classical sense of ADT) and implementations. *)
MethodBody := fun self arg => body |}
(at level 100, self at level 0, arg at level 0).
Record adt {names : list string} := { Record adt {names : list string} := {
AdtState : Type; AdtState : Type;
AdtConstructor : comp AdtState; AdtConstructor : comp AdtState;
AdtMethods : methods AdtState names AdtMethods : methods AdtState names
}. }.
(* An ADT has a state type, one constructor to initialize the state, and a set
* of methods that may read and write the state. *)
Arguments adt : clear implicits. Arguments adt : clear implicits.
@ -219,26 +283,54 @@ Notation "'ADT' { 'rep' = state 'and' 'constructor' = constr ms }" :=
Notation "'and' m1 'and' .. 'and' mn" := Notation "'and' m1 'and' .. 'and' mn" :=
(MethodsCons m1 (.. (MethodsCons mn MethodsNil) ..)) (at level 101). (MethodsCons m1 (.. (MethodsCons mn MethodsNil) ..)) (at level 101).
(* Here's one quick example, of a counter with duplicate "increment" methods. *)
Definition counter := ADT {
rep = nat
and constructor = ret 0
and method "increment1"[[self, arg]] = ret (self + arg, 0)
and method "increment2"[[self, arg]] = ret (self + arg, 0)
and method "value"[[self, _]] = ret (self, self)
}.
(* This example hasn't used the power of the [comp] monad, but we will get
* there later. *)
(** * ADT refinement *) (** * ADT refinement *)
(* What does it mean to take sound implementation steps from an ADT toward an
* efficient implementation? We formalize refinement for ADTs as well. The key
* principle will be *simulation*, very similarly to how we used the idea for
* compiler verification. *)
(* For a "before" state type [state1] and an "after" state type [state2], we
* require choice of a simulation relation [R]. This next inductive relation
* captures when all methods are pairwise compatible with [R], between the
* "before" and "after" ADTs. *)
Inductive RefineMethods {state1 state2} (R : state1 -> state2 -> Prop) Inductive RefineMethods {state1 state2} (R : state1 -> state2 -> Prop)
: forall {names}, methods state1 names -> methods state2 names -> Prop := : forall {names}, methods state1 names -> methods state2 names -> Prop :=
| RmNil : RefineMethods R MethodsNil MethodsNil | RmNil : RefineMethods R MethodsNil MethodsNil
| RmCons : forall name names (f1 : state1 -> nat -> comp (state1 * nat)) | RmCons : forall name names (f1 : state1 -> nat -> comp (state1 * nat))
(f2 : state2 -> nat -> comp (state2 * nat)) (f2 : state2 -> nat -> comp (state2 * nat))
(ms1 : methods state1 names) (ms2 : methods state2 names), (ms1 : methods state1 names) (ms2 : methods state2 names),
(* This condition is the classic "simulation diagram." *)
(forall s1 s2 arg s2' res, (forall s1 s2 arg s2' res,
R s1 s2 R s1 s2
-> f2 s2 arg (s2', res) -> f2 s2 arg (s2', res)
-> exists s1', f1 s1 arg (s1', res) -> exists s1', f1 s1 arg (s1', res)
/\ R s1' s2') /\ R s1' s2')
-> RefineMethods R ms1 ms2 -> RefineMethods R ms1 ms2
-> RefineMethods R (MethodsCons {| MethodName := name; MethodBody := f1 |} ms1) -> RefineMethods R (MethodsCons {| MethodName := name; MethodBody := f1 |} ms1)
(MethodsCons {| MethodName := name; MethodBody := f2 |} ms2). (MethodsCons {| MethodName := name; MethodBody := f2 |} ms2).
Hint Constructors RefineMethods. Hint Constructors RefineMethods.
(* When does [adt2] refine [adt1]? When there exists a simulation relation,
* with respect to which the constructors and methods all satisfy the usual
* simulation diagram. *)
Record adt_refine {names} (adt1 adt2 : adt names) : Prop := { Record adt_refine {names} (adt1 adt2 : adt names) : Prop := {
ArRel : AdtState adt1 -> AdtState adt2 -> Prop; ArRel : AdtState adt1 -> AdtState adt2 -> Prop;
ArConstructors : forall s2, ArConstructors : forall s2,
@ -248,6 +340,7 @@ Record adt_refine {names} (adt1 adt2 : adt names) : Prop := {
ArMethods : RefineMethods ArRel (AdtMethods adt1) (AdtMethods adt2) ArMethods : RefineMethods ArRel (AdtMethods adt1) (AdtMethods adt2)
}. }.
(* We will use this handy tactic to prove ADT refinements. *)
Ltac choose_relation R := Ltac choose_relation R :=
match goal with match goal with
| [ |- adt_refine ?a ?b ] => apply (Build_adt_refine _ a b R) | [ |- adt_refine ?a ?b ] => apply (Build_adt_refine _ a b R)
@ -255,13 +348,8 @@ Ltac choose_relation R :=
(** ** Example: numeric counter *) (** ** Example: numeric counter *)
Definition counter := ADT { (* Let's refine the previous counter spec into an implementation that maintains
rep = nat * two separate counters and adds them on demand. *)
and constructor = ret 0
and method "increment1"[[self, arg]] = ret (self + arg, 0)
and method "increment2"[[self, arg]] = ret (self + arg, 0)
and method "value"[[self, _]] = ret (self, self)
}.
Definition split_counter := ADT { Definition split_counter := ADT {
rep = nat * nat rep = nat * nat
@ -273,6 +361,7 @@ Definition split_counter := ADT {
Hint Extern 1 (@eq nat _ _) => simplify; linear_arithmetic. Hint Extern 1 (@eq nat _ _) => simplify; linear_arithmetic.
(* Here is why the new implementation is correct. *)
Theorem split_counter_ok : adt_refine counter split_counter. Theorem split_counter_ok : adt_refine counter split_counter.
Proof. Proof.
choose_relation (fun n p => n = fst p + snd p). choose_relation (fun n p => n = fst p + snd p).
@ -292,6 +381,8 @@ Qed.
(** * General refinement strategies *) (** * General refinement strategies *)
(* ADT refinement forms a preorder, as the next two theorems show. *)
Lemma RefineMethods_refl : forall state names (ms : methods state names), Lemma RefineMethods_refl : forall state names (ms : methods state names),
RefineMethods eq ms ms. RefineMethods eq ms ms.
Proof. Proof.
@ -346,6 +437,13 @@ Proof.
first_order. first_order.
Qed. Qed.
(* Note the use of relation composition for [refine_trans]. *)
(** ** Refining constructors *)
(* One way to refine an ADT is to perform [comp]-level refinement within its
* constructor definition. *)
Theorem refine_constructor : forall names state constr1 constr2 (ms : methods _ names), Theorem refine_constructor : forall names state constr1 constr2 (ms : methods _ names),
refine constr1 constr2 refine constr1 constr2
-> adt_refine {| AdtState := state; -> adt_refine {| AdtState := state;
@ -359,6 +457,13 @@ Proof.
choose_relation (@eq state); eauto. choose_relation (@eq state); eauto.
Qed. Qed.
(** ** Refining methods *)
(* Conceptually quite similar, refining within methods requires more syntactic
* framework. *)
(* This relation captures the process of replacing [oldbody] of method [name]
* with [newbody]. *)
Inductive ReplaceMethod {state} (name : string) Inductive ReplaceMethod {state} (name : string)
(oldbody newbody : state -> nat -> comp (state * nat)) (oldbody newbody : state -> nat -> comp (state * nat))
: forall {names}, methods state names -> methods state names -> Prop := : forall {names}, methods state names -> methods state names -> Prop :=
@ -373,7 +478,9 @@ Inductive ReplaceMethod {state} (name : string)
(MethodsCons {| MethodName := name'; MethodBody := oldbody' |} ms1) (MethodsCons {| MethodName := name'; MethodBody := oldbody' |} ms1)
(MethodsCons {| MethodName := name'; MethodBody := oldbody' |} ms2). (MethodsCons {| MethodName := name'; MethodBody := oldbody' |} ms2).
Theorem ReplaceMethod_ok : forall state name (* Let's skip ahead to the next [Theorem]. *)
Lemma ReplaceMethod_ok : forall state name
(oldbody newbody : state -> nat -> comp (state * nat)) (oldbody newbody : state -> nat -> comp (state * nat))
names (ms1 ms2 : methods state names), names (ms1 ms2 : methods state names),
(forall s arg, refine (oldbody s arg) (newbody s arg)) (forall s arg, refine (oldbody s arg) (newbody s arg))
@ -391,6 +498,7 @@ Qed.
Hint Resolve ReplaceMethod_ok. Hint Resolve ReplaceMethod_ok.
(* It is OK to replace a method body if the new refines the old as a [comp]. *)
Theorem refine_method : forall state name (oldbody newbody : state -> nat -> comp (state * nat)) Theorem refine_method : forall state name (oldbody newbody : state -> nat -> comp (state * nat))
names (ms1 ms2 : methods state names) constr, names (ms1 ms2 : methods state names) constr,
(forall s arg, refine (oldbody s arg) (newbody s arg)) (forall s arg, refine (oldbody s arg) (newbody s arg))
@ -406,6 +514,12 @@ Proof.
choose_relation (@eq state); eauto. choose_relation (@eq state); eauto.
Qed. Qed.
(** ** Representation changes *)
(* Some of the most interesting refinements select new data structures. That
* is, they pick new state types. Here we formalize that idea in terms of
* existence of an *abstraction function* from the new state type to the old. *)
Inductive RepChangeMethods {state1 state2} (absfunc : state2 -> state1) Inductive RepChangeMethods {state1 state2} (absfunc : state2 -> state1)
: forall {names}, methods state1 names -> methods state2 names -> Prop := : forall {names}, methods state1 names -> methods state2 names -> Prop :=
| RchNil : | RchNil :
@ -418,6 +532,12 @@ Inductive RepChangeMethods {state1 state2} (absfunc : state2 -> state1)
p <- oldbody (absfunc s) arg; p <- oldbody (absfunc s) arg;
s' <- pick s' where absfunc s' = fst p; s' <- pick s' where absfunc s' = fst p;
ret (s', snd p)) |} ms2). ret (s', snd p)) |} ms2).
(* Interestingly, we managed to rewrite all method bodies automatically, to be
* compatible with a new data structure! The catch is that our language of
* method bodies is inherently noncomputational. We leave nontrivial work for
* ourselves, in further refinement of method bodies to remove "pick"
* operations. Note how the generic method template above relies on "pick"
* operations to invert abstraction functions. *)
Lemma RepChangeMethods_ok : forall state1 state2 (absfunc : state2 -> state1) Lemma RepChangeMethods_ok : forall state1 state2 (absfunc : state2 -> state1)
names (ms1 : methods state1 names) (ms2 : methods state2 names), names (ms1 : methods state1 names) (ms2 : methods state2 names),
@ -461,12 +581,19 @@ Ltac refine_method nam := eapply refine_trans; [ eapply refine_method with (name
| repeat (refine (RepmHead _ _ _ _ _) | repeat (refine (RepmHead _ _ _ _ _)
|| (refine (RepmSkip _ _ _ _ _ _ _ _ _ _); [ equality | ])) ]; || (refine (RepmSkip _ _ _ _ _ _ _ _ _ _); [ equality | ])) ];
cbv beta; simplify; hide_evars | ]. cbv beta; simplify; hide_evars | ].
(* Don't be thrown off by the [refine] tactic used here. It is not related to
* our notion of refinement! See module SubsetTypes for an explanation of
* it. *)
Ltac refine_finish := apply refine_refl. Ltac refine_finish := apply refine_refl.
(** ** Example: the numeric counter again *) (** ** Example: the numeric counter again *)
Definition derived_counter : { counter' | adt_refine counter counter' }. (* Let's generate the two-counter variant through the process of finding a
* proof, in contrast to theorem [split_counter_ok], which started with the full
* code of the transformed ADT. *)
Definition derived_counter : sig (adt_refine counter).
unfold counter; eexists. unfold counter; eexists.
refine_rep (fun p => fst p + snd p). refine_rep (fun p => fst p + snd p).
@ -505,15 +632,28 @@ Eval simpl in proj1_sig derived_counter.
(** * Another refinement strategy: introducing a cache (a.k.a. finite differencing) *) (** * Another refinement strategy: introducing a cache (a.k.a. finite differencing) *)
(* Some methods begin life as expensive computations, such that it pays off to
* precompute their values. A generic refinement strategy follows this plan by
* introducing *caches*. *)
(* Here, [name] names a method whose body leaves the state alone and returns the
* result of [func] applied to that state. *)
Inductive CachingMethods {state} (name : string) (func : state -> nat) Inductive CachingMethods {state} (name : string) (func : state -> nat)
: forall {names}, methods state names -> methods (state * nat) names -> Prop := : forall {names}, methods state names -> methods (state * nat) names -> Prop :=
| CmNil : | CmNil :
CachingMethods name func MethodsNil MethodsNil CachingMethods name func MethodsNil MethodsNil
(* Here is how we rewrite [name] itself. We are extending state with an extra
* cache of [func]'s value, so it is legal just to return that cache. *)
| CmCached : forall names (ms1 : methods state names) (ms2 : methods _ names), | CmCached : forall names (ms1 : methods state names) (ms2 : methods _ names),
CachingMethods name func ms1 ms2 CachingMethods name func ms1 ms2
-> CachingMethods name func -> CachingMethods name func
(MethodsCons {| MethodName := name; MethodBody := (fun s _ => ret (s, func s)) |} ms1) (MethodsCons {| MethodName := name; MethodBody := (fun s _ => ret (s, func s)) |} ms1)
(MethodsCons {| MethodName := name; MethodBody := (fun s arg => ret (s, snd s)) |} ms2) (MethodsCons {| MethodName := name; MethodBody := (fun s arg => ret (s, snd s)) |} ms2)
(* Any other method now picks up an obligation to recompute the cache value
* whenever changing the state. We express that recomputation with a pick, to
* be refined later into efficient logic. *)
| CmDefault : forall name' names oldbody (ms1 : methods state names) (ms2 : methods _ names), | CmDefault : forall name' names oldbody (ms1 : methods state names) (ms2 : methods _ names),
name' <> name name' <> name
-> CachingMethods name func ms1 ms2 -> CachingMethods name func ms1 ms2
@ -575,6 +715,8 @@ Ltac refine_cache nam := eapply refine_trans; [ eapply refine_cache with (name :
(** ** An example with lists of numbers *) (** ** An example with lists of numbers *)
(* Let's work out an example of caching. *)
Definition sum := fold_right plus 0. Definition sum := fold_right plus 0.
Definition nats := ADT { Definition nats := ADT {
@ -584,7 +726,7 @@ Definition nats := ADT {
and method "sum"[[self, _]] = ret (self, sum self) and method "sum"[[self, _]] = ret (self, sum self)
}. }.
Definition optimized_nats : { nats' | adt_refine nats nats' }. Definition optimized_nats : sig (adt_refine nats).
unfold nats; eexists. unfold nats; eexists.
refine_cache "sum". refine_cache "sum".

View file

@ -27,9 +27,10 @@ The main narrative, also present in the book PDF, presents standard program-proo
* Chapter 13: `DeepAndShallowEmbeddings.v` * Chapter 13: `DeepAndShallowEmbeddings.v`
* Chapter 14: `SeparationLogic.v` * Chapter 14: `SeparationLogic.v`
* Chapter 15: `Connecting.v` * Chapter 15: `Connecting.v`
* Chapter 16: `SharedMemory.v` * Chapter 16: `ProgramDerivation.v`
* Chapter 17: `ConcurrentSeparationLogic.v` * Chapter 17: `SharedMemory.v`
* Chapter 18: `MessagesAndRefinement.v` * Chapter 18: `ConcurrentSeparationLogic.v`
* Chapter 19: `MessagesAndRefinement.v`
There are also two supplementary files that are independent of the main narrative, for introducing programming with dependent types, a distinctive Coq feature that we neither use nor recommend for the problem sets, but which many students find interesting (and useful in other contexts). There are also two supplementary files that are independent of the main narrative, for introducing programming with dependent types, a distinctive Coq feature that we neither use nor recommend for the problem sets, but which many students find interesting (and useful in other contexts).
* `SubsetTypes.v`: a first introduction to dependent types by attaching predicates to normal types (used after `CompilerCorrectness.v` in the last course offering) * `SubsetTypes.v`: a first introduction to dependent types by attaching predicates to normal types (used after `CompilerCorrectness.v` in the last course offering)

View file

@ -1,5 +1,5 @@
(** Formal Reasoning About Programs <http://adam.chlipala.net/frap/> (** Formal Reasoning About Programs <http://adam.chlipala.net/frap/>
* Chapter 16: Operational Semantics for Shared-Memory Concurrency * Chapter 17: Operational Semantics for Shared-Memory Concurrency
* Author: Adam Chlipala * Author: Adam Chlipala
* License: https://creativecommons.org/licenses/by-nc-nd/4.0/ *) * License: https://creativecommons.org/licenses/by-nc-nd/4.0/ *)