Merge branch 'dev' of github.com:plfa/plfa.github.io into dev

Phil merge in Wen's updates
This commit is contained in:
wadler 2019-07-14 13:03:40 -03:00
commit 48445a6606
7 changed files with 206 additions and 150 deletions

1
.gitignore vendored
View file

@ -1,6 +1,7 @@
## Agda files
*.agdai
.agda-stdlib.sed
.links-*.sed
## Jekyll files
_site/

View file

@ -4,7 +4,6 @@ AGDAI := $(shell find . -type f -and \( -path '*/src/*' -or -path '*/courses/*'
MARKDOWN := $(subst courses/,out/,$(subst src/,out/,$(subst .lagda.md,.md,$(AGDA))))
PLFA_DIR := $(shell dirname $(realpath $(lastword $(MAKEFILE_LIST))))
AGDA2HTML_FLAGS := --verbose --link-to-local-agda-names --use-jekyll=out/
AGDA_STDLIB_SED := ".agda-stdlib.sed"
ifeq ($(AGDA_STDLIB_VERSION),)
AGDA_STDLIB_URL := https://agda.github.io/agda-stdlib/
@ -40,10 +39,10 @@ $$(out) : out = $(subst courses/,out/,$(subst src/,out/,$(subst .lagda.md,.md,$(
$$(out) : $$(in) | out/
@echo "Processing $$(subst ./,,$$(in))"
ifeq (,$$(findstring courses/,$$(in)))
./highlight.sh $$(in)
./highlight.sh $$(in) --include-path=src/
else
# Fix links to the file itself (out/<filename> to out/<filepath>)
./highlight.sh $$(in) --include-path $(realpath src) --include-path $$(realpath $$(dir $$(in)))
./highlight.sh $$(in) --include-path=src/ --include-path=$$(dir $$(in))
@sed -i 's|out/$$(notdir $$(out))|$$(subst ./,,$$(out))|g' $$(out)
endif
endef
@ -85,7 +84,8 @@ build-incremental: $(MARKDOWN)
# Remove all auxiliary files
clean:
rm -f $(AGDA_STDLIB_SED)
rm -f .agda-stdlib.sed
rm -f .links-*.sed
ifneq ($(strip $(AGDAI)),)
rm $(AGDAI)
endif

View file

@ -7,22 +7,31 @@ shift
function out_path {
OUT="$1"
OUT=$(eval "echo \"$OUT\" | sed -e \"s|/src/|/out/|; s|/courses/|/out/|; s|\.lagda\.md|\.md|;\"")
OUT=`echo "$OUT" | sed -e "s|src/|out/|; s|courses/|out/|; s|\.lagda\.md|\.md|;"`
echo "$OUT"
}
OUT="$(out_path $SRC)"
OUT_DIR="$(dirname $OUT)"
# Extract the module name from the Agda file
# NOTE: this fails if there is more than a single space after 'module'
MOD_NAME=`grep -oP -m 1 "(?<=^module )(\\S+)(?=\\s+(\\S+\\s+)*where)" "$SRC"`
function html_path {
SRC="$1"
HTML_DIR="$2"
# Extract the module name from the Agda file
# NOTE: this fails if there is more than a single space after 'module'
MOD_NAME=`grep -oP -m 1 "(?<=^module )(\\S+)(?=\\s+(\\S+\\s+)*where)" "$SRC"`
# Extract the extension from the Agda file
SRC_EXT="$(basename $SRC)"
SRC_EXT="${SRC_EXT##*.}"
HTML="$HTML_DIR/$MOD_NAME.$SRC_EXT"
echo "$HTML"
}
# Create temporary directory and compute path to output of `agda --html`
HTML_DIR="$(mktemp -d)"
SRC_EXT="$(basename $SRC)"
SRC_EXT="${SRC_EXT##*.}"
HTML="$HTML_DIR/$MOD_NAME.$SRC_EXT"
HTML="$(html_path $SRC $HTML_DIR)"
# Highlight Syntax using Agda
set -o pipefail \
@ -69,13 +78,26 @@ fi
sed -i -f "$AGDA_STDLIB_SED" "$HTML"
# Fix links to local modules
# TODO:
# 1) gather source files from the dirname and --include-path arguments, e.g., src/ and courses/tspl/2018/
# 2) compute module filenames for these files and compute their HTML output names (as above)
# 3) compute output filenames for these files (requires output names to be computable)
# 4) create sed script which matches and replaces the HTML filenames with Jekyll links to the output filenames
# 5) run sed script
# Create a sed script which matches and repairs all local links
for INCLUDE_PATH in "$@"; do
if [[ "$INCLUDE_PATH" = --include-path=* ]]; then
INCLUDE_PATH="${INCLUDE_PATH:15}"
INCLUDE_PATH="${INCLUDE_PATH%/}"
INCLUDE_PATH="${INCLUDE_PATH#./}"
LOCAL_LINKS_SED=`echo ".links-${INCLUDE_PATH}.sed" | sed -e "s|/|-|g;"`
if [ ! -f "$LOCAL_LINKS_SED" ]; then
find "$INCLUDE_PATH" -name "*.lagda.md" -print0 | while read -d $'\0' AGDA_MODULE_SRC; do
AGDA_MODULE_SRC="${AGDA_MODULE_SRC#./}"
AGDA_MODULE_OUT="$(out_path "$AGDA_MODULE_SRC")"
AGDA_MODULE_HTML="$(basename "$(html_path "$AGDA_MODULE_SRC" "$HTML_DIR")" .md).html"
echo "s|$AGDA_MODULE_HTML|{% endraw %}{{ site.baseurl }}{% link $AGDA_MODULE_OUT %}{% raw %}|;" >> "$LOCAL_LINKS_SED"
done
fi
sed -i -f "$LOCAL_LINKS_SED" "$HTML"
fi
done
# Copy over the temporary file to the output path
mkdir -p "$OUT_DIR"

View file

@ -374,3 +374,40 @@
Title = {A filter lambda model and the completeness of type assignment},
Volume = {48},
Year = {1983}}
@inproceedings{Kokke-2015,
abstract = {As proofs in type theory become increasingly
complex, there is a growing need to provide better
proof automation. This paper shows how to implement
a Prolog-style resolution procedure in the
dependently typed programming language
Agda. Connecting this resolution procedure to Agdas
reflection mechanism provides a first-class proof
search tactic for first-order Agda terms. As a
result, writing proof automation tactics need not be
different from writing any other program. },
year = {2015},
month = jun,
isbn = {978-3-319-19796-8},
booktitle = {Mathematics of Program Construction},
volume = {9129},
series = {Lecture Notes in Computer Science},
editor = {Hinze, Ralf and Voigtländer, Janis},
doi = {10.1007/978-3-319-19797-5_14},
title = {Auto in Agda: Programming Proof Search Using Reflection},
url = {https://wenkokke.github.io/pubs/mpc2015.pdf},
publisher = {Springer International Publishing},
author = {Wen Kokke and Wouter Swierstra},
pages = {276-301}}
@thesis{Kidney-2019,
address = {Cork, Ireland},
type = {Bachelor thesis},
title = {Automatically and {Efficiently} {Illustrating} {Polynomial} {Equalities} in {Agda}},
url = {https://doisinkidney.com/pdfs/bsc-thesis.pdf},
abstract = {We present a new library which automates the construction of equivalence proofs between polynomials over commutative rings and semirings in the programming language Agda [20]. It is signi cantly faster than Agdas existing solver. We use re ection to provide a sim- ple interface to the solver, and demonstrate how to use the constructed proofs to provide step-by-step solutions.},
language = {en},
school = {University College Cork},
author = {Kidney, Donnacha Oisín},
month = apr,
year = {2019}}

View file

@ -371,28 +371,27 @@ PLFA and SF differ in several particulars. PLFA begins with a computationally
complete language, PCF, while SF begins with a minimal language, simply-typed
lambda calculus with booleans. PLFA does not include type annotations in terms,
and uses bidirectional type inference, while SF has terms with unique types and
uses type checking. SF also covers a simple imperative language with Hoare logic,
and for lambda calculus covers subtyping, record types, mutable references, and normalisation
---none of which are treated by PLFA. PLFA covers an inherently-typed de Bruijn
representation, bidirectional type inference, bisimulation, and an
untyped call-by-name language with full normalisation---none of which
are treated by SF.
uses type checking. SF also covers a simple imperative language with Hoare
logic, and for lambda calculus covers subtyping, record types, mutable
references, and normalisation---none of which are treated by PLFA. PLFA covers
an inherently-typed de Bruijn representation, bidirectional type inference,
bisimulation, and an untyped call-by-name language with full
normalisation---none of which are treated by SF.
SF has a third volume, written by Andrew Appel, on Verified Functional
Algorithms. I'm not sufficiently familiar with that volume to have a view
on whether it would be easy or hard to cover that material in Agda.
And SF recently added a fourth volume on random testing of Coq specifications
using QuickChick. There is currently no tool equivalent to QuickChick
available for Agda.
Algorithms. I'm not sufficiently familiar with that volume to have a view on
whether it would be easy or hard to cover that material in Agda. And SF recently
added a fourth volume on random testing of Coq specifications using QuickChick.
There is currently no tool equivalent to QuickChick available for Agda.
There is more material that would be desirable to include in PLFA
which was not due to limits of time. In future years, PLFA may be
extended to cover additional material, including mutable references,
normalisation, System F, pure type systems, and denotational
semantics. I'd especially like to include pure type systems as they
provide the readers with a formal model close to the dependent types
used in the book. My attempts so far to formalise pure type systems
have proved challenging, to say the least.
There is more material that would be desirable to include in PLFA which was not
due to limits of time. In future years, PLFA may be extended to cover
additional material, including mutable references, normalisation, System F, and
pure type systems. We'd especially like to include pure type systems as they
provide the readers with a formal model close to the dependent types used in the
book. Our attempts so far to formalise pure type systems have proved
challenging, to say the least.
\section{Proofs in Agda and Coq}
@ -421,23 +420,16 @@ have proved challenging, to say the least.
\label{fig:sf-progress-2}
\end{figure}
% \begin{figure}[t]
% \includegraphics[width=\textwidth]{figures/sf-progress-3.png}
% \caption{SF, Progress (3/3)}
% \label{fig:sf-progress-3}
% \end{figure}
The introduction listed several reasons for preferring Agda over Coq.
But Coq tactics enable more compact proofs. Would it be possible for
PLFA to cover the same material as SF, or would the proofs
balloon to unmanageable size?
As an experiment, I first rewrote SF's development of simply-typed
lambda calculus (SF, Chapters Stlc and StlcProp) in Agda. I was a
newbie to Agda, and translating the entire development, sticking as
closely as possible to the development in SF, took me about two days.
I was pleased to discover that the proofs remained about the same
size.
As an experiment, Philip first rewrote SF's development of simply-typed lambda
calculus (SF, Chapters Stlc and StlcProp) in Agda. He was a newbie to Agda, and
translating the entire development, sticking as closely as possible to the
development in SF, took about two days. We were pleased to discover that the
proofs remained about the same size.
There was also a pleasing surprise regarding the structure of the
proofs. While most proofs in both SF and PLFA are carried out by
@ -446,7 +438,7 @@ central proof, that substitution preserves types, is carried out by
induction on terms for a technical reason
(the context is extended by a variable binding, and hence not
sufficiently ``generic'' to work well with Coq's induction tactic). In
Agda, I had no trouble formulating the same proof over evidence that
Agda, we had no trouble formulating the same proof over evidence that
the term is well typed, and didn't even notice SF's description of the
issue until I was done.
@ -456,26 +448,35 @@ deterministic. There are 18 cases, one case per line. Ten of the
cases deal with the situation where there are potentially two
different reductions; each case is trivially shown to be
impossible. Five of the ten cases are redundant, as they just involve
switching the order of the arguments. I had to copy the cases
switching the order of the arguments. We had to copy the cases
suitably permuted. It would be preferable to reinvoke the proof on
switched arguments, but this would not pass Agda's termination checker
since swapping the arguments doesn't yield a recursive call on
structurally smaller arguments. I suspect tactics could cut down the
proof significantly. I tried to compare with SF's proof that reduction
structurally smaller arguments. We suspect tactics could cut down the
proof significantly. We tried to compare with SF's proof that reduction
is deterministic, but failed to find that proof.
SF covers an imperative language with Hoare logic, culminating in
code that takes an imperative programme suitably decorated with preconditions
and postconditions and generates the necessary
verification conditions. The conditions are then verified by a custom
tactic, where any questions of arithmetic are resolved by the
``omega'' tactic invoking a decision procedure. The entire
exercise would be easy to repeat in Agda, save for the last step: I
suspect Agda's automation would not be up to verifying the generated
conditions, requiring tedious proofs by hand. However, I had already
decided to omit Hoare logic in order to focus on lambda calculus.
SF covers an imperative language with Hoare logic, culminating in code that
takes an imperative programme suitably decorated with preconditions and
postconditions and generates the necessary verification conditions. The
conditions are then verified by a custom tactic, where any questions of
arithmetic are resolved by the ``omega'' tactic invoking a decision procedure.
The entire exercise would be easy to repeat in Agda, save for the last step, as
Agda does not offer support for proof automation out of the box. It is
certainly possible to implement proof automation in Agda---see, e.g., the auto
tactic by \citet{Kokke-2015}, and the collection of tactics in Ulf Norell's
\texttt{agda-prelude}\footnote{\url{https://github.com/UlfNorell/agda-prelude}}.
The standard library comes equipped with solvers for equations on monoids and
rings\footnote{\url{https://agda.github.io/agda-stdlib/Algebra.Solver.Ring.html}},
and a much improved solver for equalities on rings was recently contributed by
\citet{Kidney-2019}.
We suspect that, while Agda's automation would be up to verifying the generated
conditions, some effort would be require to implement the required custom
tactic, and a section would need to be added to the book to cover proof
automation. For the time being, we have decided to omit Hoare logic in order to
focus on lambda calculus.
To give a flavour of how the texts compare, I show the
To give a flavour of how the texts compare, we show the
proof of progress for simply-typed lambda calculus from both texts.
Figures~\ref{fig:plfa-progress-1} and \ref{fig:plfa-progress-2}
are taken from PLFA, Chapter Properties,
@ -495,20 +496,19 @@ it easy to use a line of dashes to separate hypotheses from conclusion
in inference rules. The proof of proposition \texttt{progress} (the different
case making it a distinct name) is layed out carefully. The neat
indented structure emphasises the case analysis, and all right-hand
sides line-up in the same column. My hope as an author is that students
sides line-up in the same column. Our hope as authors is that students
will read the formal proof first, and use it as a tabular guide
to the informal explanation that follows.
SF puts the informal explanation first, followed by the formal proof.
The text hides the formal proof script under an icon;
the figure shows what appears when the icon is expanded.
As a teacher I was aware that
students might skip it on a first reading, and I would have to hope the
students would return to it and step through it with an interactive
tool in order to make it intelligible. I expect the students skipped over
many such proofs. This particular proof forms the basis for a question
of the mock exam and the past exams, so I expect most students will actually
look at this one if not all the others.
SF puts the informal explanation first, followed by the formal proof. The text
hides the formal proof script under an icon; the figure shows what appears when
the icon is expanded. As teachers, we were aware that students might skip the
formal proof on a first reading, and we would have to hope the students would
return to it and step through it with an interactive tool in order to make it
intelligible. We expect the students skipped over many such proofs. This
particular proof forms the basis for a question of the mock exam and the past
exams, so we expect most students will actually look at this one if not all the
others.
\newcommand{\ex}{\texttt{x}}
\newcommand{\why}{\texttt{y}}
@ -569,7 +569,7 @@ recorded. Students may recreate it by commenting out bits of code and
introducing a hole in their place. PLFA contains some prose descriptions
of interactively building code, but mainly contains code that students
can read. They may also introduce holes to interact with the code, but
I expect this will be rarer than with SF.
we expect this will be rarer than with SF.
SF encourages students to interact with all the scripts in the text.
Trying to understand a Coq proof script without running it
@ -581,9 +581,9 @@ Understanding the code often requires working out the types, but
head; when it is not easy, students still have the option of
interaction.
While students are keen to interact to create code, I have found they
While students are keen to interact to create code, we have found they
are reluctant to interact to understand code created by others. For
this reason, I suspect this may make Agda a more suitable vehicle for
this reason, we suspect this may make Agda a more suitable vehicle for
teaching. Nate Foster suggests this hypothesis is ripe to be tested
empirically, perhaps using techniques similar to those of
\citet{Danas-et-al-2017}.
@ -635,7 +635,8 @@ Redex \citep{Felleisen-et-al-2009} and K \citep{Rosu-Serbanuta-2010},
advertise as one of their advantages that they can generate
a prototype from descriptions of the reduction rules.
I was therefore surprised to realise that any constructive proof of
% TODO: rewrite use of 'Philip' to reflect all authors?
Philip was therefore surprised to realise that any constructive proof of
progress and preservation \emph{automatically} gives rise to such a
prototype. The input is a term together with evidence the term is
well-typed. (In the inherently-typed case, these are the same thing.)
@ -659,7 +660,7 @@ improve readability; in addition, the text includes examples of
running the evaluator with its unedited output.
It is immediately obvious that progress and preservation make it
trivial to construct a prototype evaluator, and yet I cannot find such
trivial to construct a prototype evaluator, and yet we cannot find such
an observation in the literature nor mentioned in an introductory
text. It does not appear in SF, nor in \citet{Harper-2016}. A plea
to the Agda mailing list failed to turn up any prior mentions.
@ -667,7 +668,7 @@ The closest related observation I have seen in the published
literature is that evaluators can be extracted from proofs of
normalisation \citep{Berger-1993,Dagand-and-Scherer-2015}.
(Late addition: My plea to the Agda list eventually bore fruit. Oleg
(Late addition: Our plea to the Agda list eventually bore fruit. Oleg
Kiselyov directed me to unpublished remarks on his web page where he
uses the name \texttt{eval} for a proof of progress and notes ``the
very proof of type soundness can be used to evaluate sample
@ -688,12 +689,13 @@ expressions'' \citep{Kiselyov-2009}.)
\label{fig:inherent}
\end{figure}
% TODO: consider rewriting this?
The second part of PLFA first discusses two different approaches to
modeling simply-typed lambda calculus. It first presents raw
terms with named variables and a separate typing relation and
then shifts to inherently-typed terms with de Bruijn indices.
Before writing the text, I had thought the two approaches
complementary, with no clear winner. Now I am convinced that the
Before writing the text, Philip had thought the two approaches
complementary, with no clear winner. Now he is convinced that the
inherently-typed approach is superior.
Figure~\ref{fig:raw} presents the raw approach.
@ -714,29 +716,25 @@ the types of variables and terms are indexed by a context and a type,
so that $\GG\:{\ni}\:\AY$ and $\GG\:{\vdash}\:\AY$ denote
variables and terms, respectively, that under context $\GG$ have type $\AY$.
The indexed types closely resemble the previous judgments:
we now represent a variable or a term by the proof that it is well-typed.
In particular, the proof that a variable is well-typed in the raw approach
we now represent a variable or a term by the proof that it is well typed.
In particular, the proof that a variable is well typed in the raw approach
corresponds to a de Bruijn index in the inherent approach.
The raw approach requires more lines of code than
the inherent approach. The separate definition of raw
terms is not needed in the inherent
approach; and one judgment in the raw approach needs to check that
$\ex \not\equiv \why$, while the corresponding judgment in the
inherent approach does not. The difference becomes more pronounced
when including the code for substitution, reductions, and proofs of
progress and preservation. In particular, where the raw approach
requires one first define substitution and reduction and then prove
they preserve types, the inherent approach establishes substitution at
the same time it defines substitution and reduction.
The raw approach requires more lines of code than the inherent approach. The
separate definition of raw terms is not needed in the inherent approach; and one
judgment in the raw approach needs to check that $\ex \not\equiv \why$, while
the corresponding judgment in the inherent approach does not. The difference
becomes more pronounced when including the code for substitution, reductions,
and proofs of progress and preservation. In particular, where the raw approach
requires one first define substitution and reduction and then prove they
preserve types, the inherent approach establishes substitution at the same time
it defines substitution and reduction.
Stripping
out examples and any proofs that appear in one but not the other (but
could have appeared in both), the full development in PLFA for the raw approach takes 451
lines (216 lines of definitions and 235 lines for the proofs) and the
development for the inherent approach takes 275 lines (with
definitions and proofs interleaved). We have 451 / 235 = 1.64,
close to the golden ratio.
Stripping out examples and any proofs that appear in one but not the other (but
could have appeared in both), the full development in PLFA for the raw approach
takes 451 lines (216 lines of definitions and 235 lines for the proofs) and the
development for the inherent approach takes 275 lines (with definitions and
proofs interleaved). We have 451 / 235 = 1.64, close to the golden ratio.
The inherent approach also has more expressive power. The raw
approach is restricted to substitution of one variable by a closed
@ -744,11 +742,12 @@ term, while the inherent approach supports simultaneous substitution
of all variables by open terms, using a pleasing formulation due to
\citet{McBride-2005}, inspired by \citet{Goguen-and-McKinna-1997} and
\citet{Altenkirch-and-Reus-1999} and described in
\citet{Allais-et-al-2017}. In fact, I did manage to write a variant of
\citet{Allais-et-al-2017}. In fact, we did manage to write a variant of
the raw approach with simultaneous open substitution along the lines
of McBride, but the result was too complex for use in an introductory
text, requiring 695 lines of code---more than the total for the other
two approaches combined.
% TODO: is this still available somewhere in extra/?
The text develops both approaches because the raw approach is more
familiar, and because placing the inherent approach first would
@ -768,26 +767,24 @@ The benefits of the inherent approach
are well known to some. The technique was introduced by
\citet{Altenkirch-and-Reus-1999}, and widely used elsewhere,
notably by \citet{Chapman-2009} and \citet{Allais-et-al-2017}.
I'm grateful to David Darais for bringing it to my attention.
Philip is grateful to David Darais for bringing it to his attention.
\section{Conclusion}
I look forward to experience teaching from the new text,
We look forward to experience teaching from the new text,
and encourage others to use it too. Please comment!
\paragraph{Acknowledgments}
A special thank you to my coauthor, Wen Kokke. For inventing ideas on
which PLFA is based, and for hand-holding, many thanks to Conor
McBride, James McKinna, Ulf Norell, and Andreas Abel. For showing me
how much more compact it is to avoid raw terms, thanks to David
Darais. For inspiring my work by writing SF, thanks to Benjamin
Pierce and his coauthors. For comments on a draft of this paper, an
extra thank you to James McKinna, Ulf Norell, Andreas Abel, and
Benjamin Pierce. This research was supported by EPSRC Programme Grant
EP/K034413/1.
For inventing ideas on which PLFA is based, and for hand-holding, many thanks to
Conor McBride, James McKinna, Ulf Norell, and Andreas Abel. For showing me how
much more compact it is to avoid raw terms, thanks to David Darais. For
inspiring my work by writing SF, thanks to Benjamin Pierce and his coauthors.
For comments on a draft of this paper, an extra thank you to James McKinna, Ulf
Norell, Andreas Abel, and Benjamin Pierce. This research was supported by EPSRC
Programme Grant EP/K034413/1.
\section*{References}

View file

@ -57,7 +57,6 @@ open import Data.String using (String; _≟_)
open import Data.Nat using (; zero; suc)
open import Data.Empty using (⊥; ⊥-elim)
open import Relation.Nullary using (Dec; yes; no; ¬_)
open import Relation.Nullary.Negation using (¬?)
open import Data.List using (List; _∷_; [])
```
@ -1065,7 +1064,7 @@ The constructors `Z` and `S` correspond roughly to the constructors
`here` and `there` for the element-of relation `_∈_` on lists.
Constructor `S` takes an additional parameter, which ensures that
when we look up a variable that it is not _shadowed_ by another
variable with the same name earlier in the list.
variable with the same name to its left in the list.
### Typing judgment
@ -1081,7 +1080,7 @@ For example:
* `` ∅ , "s" ⦂ `` , "z" ⦂ `` "s" ⦂ `` ``
* `` ∅ , "s" ⦂ `` , "z" ⦂ `` "s" · ` "z" ⦂ ` ``
* `` ∅ , "s" ⦂ `` , "z" ⦂ `` "s" · (` "s" · ` "z") ⦂ ` ``
* `` ∅ , "s" ⦂ ``(ƛ "z" ⇒ ` "s" · (` "s" · ` "z"))` ⇒ ` ``
* `` ∅ , "s" ⦂ ``ƛ "z" ⇒ ` "s" · (` "s" · ` "z") ⦂ ` ⇒ ` ``
* `` ∅ ⊢ ƛ "s" ⇒ ƛ "z" ⇒ ` "s" · (` "s" · ` "z") ⦂ (` ⇒ `) ⇒ ` ⇒ ` ``
Typing is formalised as follows:

View file

@ -61,7 +61,7 @@ that `M —→ N`.
So, either we have a value, and we are done, or we can take a reduction step.
In the latter case, we would like to apply progress again. But to do so we need
to know that the term yielded by the reduction is itself closed and well-typed.
to know that the term yielded by the reduction is itself closed and well typed.
It turns out that this property holds whenever we start with a closed,
well-typed term.
@ -70,7 +70,7 @@ _Preservation_: If `∅ ⊢ M ⦂ A` and `M —→ N` then `∅ ⊢ N ⦂ A`.
This gives us a recipe for automating evaluation. Start with a closed
and well-typed term. By progress, it is either a value, in which case
we are done, or it reduces to some other term. By preservation, that
other term will itself be closed and well-typed. Repeat. We will
other term will itself be closed and well typed. Repeat. We will
either loop forever, in which case evaluation does not terminate, or
we will eventually reach a value, which is guaranteed to be closed and
of the same type as the original term. We will turn this recipe into
@ -78,8 +78,8 @@ Agda code that can compute for us the reduction sequence of `plus ·
two · two`, and its Church numeral variant.
(The development in this chapter was inspired by the corresponding
development in _Software Foundations, volume _Programming Language
Foundations_, chapter _StlcProp_. It will turn out that one of our technical choices
development in _Software Foundations_, Volume _Programming Language
Foundations_, Chapter _StlcProp_. It will turn out that one of our technical choices
— to introduce an explicit judgment `Γ ∋ x ⦂ A` in place of
treating a context as a function from identifiers to types —
permits a simpler development. In particular, we can prove substitution preserves
@ -131,7 +131,7 @@ Well-typed values must take one of a small number of _canonical forms_.
We provide an analogue of the `Value` relation that relates values
to their types. A lambda expression must have a function type,
and a zero or successor expression must be a natural.
Further, the body of a function must be well-typed in a context
Further, the body of a function must be well typed in a context
containing only its bound variable, and the argument of successor
must itself be canonical:
```
@ -176,8 +176,8 @@ There are only three interesting cases to consider:
* If the term is zero then it is canonical trivially.
* If the term is a successor then since it is well-typed its argument
is well-typed, and since it is a value its argument is a value.
* If the term is a successor then since it is well typed its argument
is well typed, and since it is a value its argument is a value.
Hence, by induction its argument is also canonical.
The variable case is thrown out because a closed term has no free
@ -186,7 +186,7 @@ application, case expression, and fixpoint are thrown out because they
are not values.
Conversely, if a term is canonical then it is a value
and it is well-typed in the empty context:
and it is well typed in the empty context:
```
value : ∀ {M A}
→ Canonical M ⦂ A
@ -221,8 +221,8 @@ then the term
s · `zero
cannot reduce because we do not know which function is bound to the
free variable `s`. The first of those terms is ill-typed, and the
second has a free variable. Every term that is well-typed and closed
free variable `s`. The first of those terms is ill typed, and the
second has a free variable. Every term that is well typed and closed
has the desired property.
_Progress_: If `∅ ⊢ M ⦂ A` then either `M` is a value or there is an `N` such
@ -247,7 +247,7 @@ A term `M` makes progress if either it can take a step, meaning there
exists a term `N` such that `M —→ N`, or if it is done, meaning that
`M` is a value.
If a term is well-typed in the empty context then it satisfies progress:
If a term is well typed in the empty context then it satisfies progress:
```
progress : ∀ {M A}
→ ∅ ⊢ M ⦂ A
@ -272,7 +272,7 @@ progress (⊢case ⊢L ⊢M ⊢N) with progress ⊢L
... | C-suc CL = step (β-suc (value CL))
progress (⊢μ ⊢M) = step β-μ
```
We induct on the evidence that the term is well-typed.
We induct on the evidence that the term is well typed.
Let's unpack the first three cases:
* The term cannot be a variable, since no variable is well typed
@ -281,7 +281,7 @@ Let's unpack the first three cases:
* If the term is a lambda abstraction then it is a value.
* If the term is an application `L · M`, recursively apply
progress to the derivation that `L` is well-typed:
progress to the derivation that `L` is well typed:
+ If the term steps, we have evidence that `L —→ L`,
which by `ξ-·₁` means that our original term steps
@ -289,7 +289,7 @@ Let's unpack the first three cases:
+ If the term is done, we have evidence that `L` is
a value. Recursively apply progress to the derivation
that `M` is well-typed:
that `M` is well typed:
- If the term steps, we have evidence that `M —→ M`,
which by `ξ-·₂` means that our original term steps
@ -377,12 +377,12 @@ In symbols:
---------------------------------
∀ {M A} → Γ ⊢ M ⦂ A → Δ ⊢ M ⦂ A
Three important corollaries follow. The _weaken_ lemma asserts a term
well-typed in the empty context is also well-typed in an arbitrary
context. The _drop_ lemma asserts a term well-typed in a context where the
same variable appears twice remains well-typed if we drop the shadowed
occurrence. The _swap_ lemma asserts a term well-typed in a context
remains well-typed if we swap two variables.
Three important corollaries follow. The _weaken_ lemma asserts that a term
which is well typed in the empty context is also well typed in an arbitrary
context. The _drop_ lemma asserts that a term which is well typed in a context
where the same variable appears twice remains well typed if we drop the shadowed
occurrence. The _swap_ lemma asserts that a term which is well typed in a
context remains well typed if we swap two variables.
(Renaming is similar to the _context invariance_ lemma in _Software
Foundations_, but it does not require the definition of
@ -488,7 +488,7 @@ rename ρ (⊢μ ⊢M) = ⊢μ (rename (ext ρ) ⊢M)
```
As before, let `ρ` be the name of the map that takes evidence that
`x` appears in `Γ` to evidence that `x` appears in `Δ`. We induct
on the evidence that `M` is well-typed in `Γ`. Let's unpack the
on the evidence that `M` is well typed in `Γ`. Let's unpack the
first three cases:
* If the term is a variable, then applying `ρ` to the evidence
@ -505,7 +505,7 @@ function and the argument.
The remaining cases are similar, using induction for each subterm, and
extending the map whenever the construct introduces a bound variable.
The induction is over the derivation that the term is well-typed,
The induction is over the derivation that the term is well typed,
so extending the context doesn't invalidate the inductive hypothesis.
Equivalently, the recursion terminates because the second argument
always grows smaller, even though the first argument sometimes grows larger.
@ -623,7 +623,7 @@ subst {x = y} ⊢V (⊢μ {x = x} ⊢M) with x ≟ y
... | yes refl = ⊢μ (drop ⊢M)
... | no x≢y = ⊢μ (subst ⊢V (swap x≢y ⊢M))
```
We induct on the evidence that `N` is well-typed in the
We induct on the evidence that `N` is well typed in the
context `Γ` extended by `x`.
First, we note a wee issue with naming. In the lemma
@ -940,7 +940,7 @@ data Steps (L : Term) : Set where
----------
→ Steps L
```
The evaluator takes gas and evidence that a term is well-typed,
The evaluator takes gas and evidence that a term is well typed,
and returns the corresponding steps:
```
eval : ∀ {L A}
@ -955,15 +955,15 @@ eval {L} (gas (suc m)) ⊢L with progress ⊢L
... | steps M—↠N fin = steps (L —→⟨ L—→M ⟩ M—↠N) fin
```
Let `L` be the name of the term we are reducing, and `⊢L` be the
evidence that `L` is well-typed. We consider the amount of gas
evidence that `L` is well typed. We consider the amount of gas
remaining. There are two possibilities:
* It is zero, so we stop early. We return the trivial reduction
sequence `L —↠ L`, evidence that `L` is well-typed, and an
sequence `L —↠ L`, evidence that `L` is well typed, and an
indication that we are out of gas.
* It is non-zero and after the next step we have `m` gas remaining.
Apply progress to the evidence that term `L` is well-typed. There
Apply progress to the evidence that term `L` is well typed. There
are two possibilities:
+ Term `L` is a value, so we are done. We return the
@ -971,9 +971,9 @@ remaining. There are two possibilities:
well-typed, and the evidence that `L` is a value.
+ Term `L` steps to another term `M`. Preservation provides
evidence that `M` is also well-typed, and we recursively invoke
evidence that `M` is also well typed, and we recursively invoke
`eval` on the remaining gas. The result is evidence that
`M —↠ N`, together with evidence that `N` is well-typed and an
`M —↠ N`, together with evidence that `N` is well typed and an
indication of whether reduction finished. We combine the evidence
that `L —→ M` and `M —↠ N` to return evidence that `L —↠ N`,
together with the other relevant evidence.
@ -983,7 +983,7 @@ remaining. There are two possibilities:
We can now use Agda to compute the non-terminating reduction
sequence given earlier. First, we show that the term `sucμ`
is well-typed:
is well typed:
```
⊢sucμ : ∅ ⊢ μ "x" ⇒ `suc ` "x" ⦂ `
⊢sucμ = ⊢μ (⊢suc (⊢` ∋x))
@ -1327,7 +1327,7 @@ postulate
```
Using preservation, it is easy to show that after any number of steps,
a well-typed term remains well-typed:
a well-typed term remains well typed:
```
postulate
preserves : ∀ {M N A}