feat(frontends/lean,library/unifier): checkpoints at have-expressions

@avigad, @fpvandoorn, @rlewis1988, @dselsam

This commit modifies how have-expressions are elaborated.
Now, to process

     have H : <type>, from <proof>,
     <rest>

we first process the constraints in <type> and <proof> simultaneously.
After all these constraints are solved, the elaborator performs
a Prolog-like cut, and process the constraints in <rest>.

So, all overloads, type classes and coercions in <type> and <proof> are solved
before we start processing <rest>. Moreover, while processing <rest>, we
cannot backtrack to <type> and <proof> anymore.

I fixed all affected proofs in the standard and HoTT libraries in
previous commits pushed today and yesterday. I think most affected proofs were not using a good
style and/or were easy to fix. Here is a common pattern that does not
work anymore.

   structure has_scalar [class] (F V : Type) :=
   (smul : F → V → V)

   infixl ` • `:73 := has_scalar.smul

   proposition smul_zero (a : R) : a • (0 : M) = 0 :=
   have a • 0 + a • 0 = a • 0 + 0, by rewrite [-smul_left_distrib, *add_zero],
   !add.left_cancel this

The `have` doesn't work because Lean can't figure out the type of 0 before
it starts processing `!add.left_cancel this`. This is easy to fix, we just have to
annotate one of the `0`s in the `have`:

   proposition smul_zero (a : R) : a • (0 : M) = 0 :=
   have a • (0:M) + a • 0 = a • 0 + 0, by rewrite [-smul_left_distrib, *add_zero],
   !add.left_cancel this

BTW, all tactics are still being executed after all constraints are solved.
We may change that in the future. I didn't want to execute
the tactics at <proof> before <rest> because of universe
meta-variables. In Lean, unassigned universe meta-variables become
parameters. Moreover, we perform this conversion *before*
we start processing tactics. Reason: universe meta-variables
create many problems for tactics such as `rewrite`, `blast` and `simp`.

Finally, we can recover the previous behavior using the option

         set_option parser.checkpoint_have false
This commit is contained in:
Leonardo de Moura 2016-02-04 18:41:55 -08:00
parent 30d6853ffd
commit 04eaf184a9
7 changed files with 141 additions and 24 deletions

View file

@ -4,6 +4,7 @@ Released under Apache 2.0 license as described in the file LICENSE.
Author: Leonardo de Moura
*/
#include "util/sexpr/option_declarations.h"
#include "util/sstream.h"
#include "kernel/abstract.h"
#include "library/annotation.h"
@ -36,7 +37,17 @@ Author: Leonardo de Moura
#include "frontends/lean/obtain_expr.h"
#include "frontends/lean/nested_declaration.h"
#ifndef LEAN_DEFAULT_PARSER_CHECKPOINT_HAVE
#define LEAN_DEFAULT_PARSER_CHECKPOINT_HAVE true
#endif
namespace lean {
static name * g_parser_checkpoint_have = nullptr;
bool get_parser_checkpoint_have(options const & opts) {
return opts.get_bool(*g_parser_checkpoint_have, LEAN_DEFAULT_PARSER_CHECKPOINT_HAVE);
}
namespace notation {
static expr parse_Type(parser & p, unsigned, expr const *, pos_info const & pos) {
if (p.curr_is_token(get_llevel_curly_tk())) {
@ -475,6 +486,8 @@ static expr parse_have_core(parser & p, pos_info const & pos, optional<expr> con
}
// remark: mk_contextual_info(false) informs the elaborator that prop should not occur inside metavariables.
body = abstract(body, l);
if (get_parser_checkpoint_have(p.get_options()))
body = mk_checkpoint_annotation(body);
expr r = p.save_pos(mk_have_annotation(p.save_pos(mk_lambda(id, prop, body, bi), pos)), pos);
return p.mk_app(r, proof, pos);
}
@ -814,6 +827,10 @@ void initialize_builtin_exprs() {
*g_nud_table = notation::init_nud_table();
g_led_table = new parse_table();
*g_led_table = notation::init_led_table();
g_parser_checkpoint_have = new name{"parser", "checkpoint_have"};
register_bool_option(*g_parser_checkpoint_have, LEAN_DEFAULT_PARSER_CHECKPOINT_HAVE,
"(parser) introduces a checkpoint on have-expressions, checkpoints are like Prolog-cuts");
}
void finalize_builtin_exprs() {
@ -821,5 +838,6 @@ void finalize_builtin_exprs() {
delete g_nud_table;
delete notation::H_obtain_from;
delete notation::g_not;
delete g_parser_checkpoint_have;
}
}

View file

@ -40,6 +40,7 @@ Author: Leonardo de Moura
#include "library/util.h"
#include "library/choice_iterator.h"
#include "library/projection.h"
#include "library/trace.h"
#include "library/pp_options.h"
#include "library/class_instance_resolution.h"
#include "library/tactic/expr_to_tactic.h"
@ -1239,8 +1240,9 @@ constraint elaborator::mk_equations_cnstr(expr const & m, expr const & eqns) {
expr new_eqns = new_s.instantiate_all(eqns);
bool reject_type_is_meta = false;
new_eqns = solve_unassigned_mvars(new_s, new_eqns, reject_type_is_meta);
if (display_unassigned_mvars(new_eqns, new_s))
if (display_unassigned_mvars(new_eqns, new_s)) {
return lazy_list<constraints>();
}
type_checker_ptr tc = mk_type_checker(_env, std::move(ngen));
new_eqns = assign_equation_lhs_metas(*tc, new_eqns);
expr val = compile_equations(*tc, _ios, new_eqns, meta, meta_type);
@ -1331,7 +1333,8 @@ expr elaborator::visit_equations(expr const & eqns, constraint_seq & cs) {
expr m = m_full_context.mk_meta(m_ngen, some_expr(type), eqns.get_tag());
register_meta(m);
constraint c = mk_equations_cnstr(m, new_eqns);
cs += c;
/* We use stack policy for processing MaxDelayed constraints */
cs = c + cs;
return m;
}
@ -1695,6 +1698,33 @@ expr elaborator::visit_prenum(expr const & e, constraint_seq & cs) {
}
}
expr elaborator::visit_checkpoint_expr(expr const & e, constraint_seq & cs) {
expr arg = get_annotation_arg(e);
expr m;
if (is_by(arg))
m = m_context.mk_meta(m_ngen, none_expr(), e.get_tag());
else
m = m_full_context.mk_meta(m_ngen, none_expr(), e.get_tag());
register_meta(m);
local_context ctx = m_context;
local_context full_ctx = m_full_context;
bool in_equation_lhs = m_in_equation_lhs;
auto fn = [=](expr const & meta, expr const & /* type */, substitution const & /* s */,
name_generator && /* ngen */) {
flet<local_context> set1(m_context, ctx);
flet<local_context> set2(m_full_context, full_ctx);
flet<bool> set3(m_in_equation_lhs, in_equation_lhs);
pair<expr, constraint_seq> rcs = visit(arg);
expr r = rcs.first;
constraint_seq cs = rcs.second;
cs = mk_eq_cnstr(meta, r, justification()) + cs;
return lazy_list<constraints>(cs.to_list());
};
justification j;
cs += mk_choice_cnstr(m, fn, to_delay_factor(cnstr_group::Checkpoint), true, j);
return m;
}
expr elaborator::visit_core(expr const & e, constraint_seq & cs) {
if (is_prenum(e)) {
return visit_prenum(e, cs);
@ -1738,6 +1768,8 @@ expr elaborator::visit_core(expr const & e, constraint_seq & cs) {
return visit_structure_instance(e, cs);
} else if (is_obtain_expr(e)) {
return visit_obtain_expr(e, cs);
} else if (is_checkpoint_annotation(e)) {
return visit_checkpoint_expr(e, cs);
} else {
switch (e.kind()) {
case expr_kind::Local: return e;

View file

@ -190,6 +190,8 @@ class elaborator : public coercion_info_manager {
expr visit_prenum(expr const & e, constraint_seq & cs);
expr visit_checkpoint_expr(expr const & e, constraint_seq & cs);
void check_used_local_tactic_hints();
void show_goal(proof_state const & ps, expr const & start, expr const & end, expr const & curr);

View file

@ -133,23 +133,28 @@ expr copy_annotations(expr const & from, expr const & to) {
return r;
}
static name * g_have = nullptr;
static name * g_show = nullptr;
static name * g_have = nullptr;
static name * g_show = nullptr;
static name * g_checkpoint = nullptr;
expr mk_have_annotation(expr const & e) { return mk_annotation(*g_have, e); }
expr mk_show_annotation(expr const & e) { return mk_annotation(*g_show, e); }
expr mk_checkpoint_annotation(expr const & e) { return mk_annotation(*g_checkpoint, e); }
bool is_have_annotation(expr const & e) { return is_annotation(e, *g_have); }
bool is_show_annotation(expr const & e) { return is_annotation(e, *g_show); }
bool is_checkpoint_annotation(expr const & e) { return is_annotation(e, *g_checkpoint); }
void initialize_annotation() {
g_annotation = new name("annotation");
g_annotation_opcode = new std::string("Annot");
g_annotation_macros = new annotation_macros();
g_have = new name("have");
g_show = new name("show");
g_have = new name("have");
g_show = new name("show");
g_checkpoint = new name("checkpoint");
register_annotation(*g_have);
register_annotation(*g_show);
register_annotation(*g_checkpoint);
register_macro_deserializer(get_annotation_opcode(),
[](deserializer & d, unsigned num, expr const * args) {

View file

@ -57,10 +57,12 @@ expr copy_annotations(expr const & from, expr const & to);
expr mk_have_annotation(expr const & e);
/** \brief Tag \c e as a 'show'-expression. 'show' is a pre-registered annotation. */
expr mk_show_annotation(expr const & e);
expr mk_checkpoint_annotation(expr const & e);
/** \brief Return true iff \c e was created using #mk_have_annotation. */
bool is_have_annotation(expr const & e);
/** \brief Return true iff \c e was created using #mk_show_annotation. */
bool is_show_annotation(expr const & e);
bool is_checkpoint_annotation(expr const & e);
/** \brief Return the serialization 'opcode' for annotation macros. */
std::string const & get_annotation_opcode();

View file

@ -289,15 +289,30 @@ unify_status unify_simple(substitution & s, constraint const & c) {
static constraint * g_dont_care_cnstr = nullptr;
static unsigned g_group_size = 1u << 28;
constexpr unsigned g_num_groups = 8;
static unsigned g_cnstr_group_first_index[g_num_groups] = { 0, g_group_size, 2*g_group_size, 3*g_group_size, 4*g_group_size, 5*g_group_size, 6*g_group_size, 7*g_group_size};
constexpr unsigned g_num_groups = 9;
static unsigned g_cnstr_group_first_index[g_num_groups] = { 0, g_group_size, 2*g_group_size, 3*g_group_size, 4*g_group_size, 5*g_group_size, 6*g_group_size, 7*g_group_size, 8*g_group_size};
static unsigned get_group_first_index(cnstr_group g) {
return g_cnstr_group_first_index[static_cast<unsigned>(g)];
}
static cnstr_group to_cnstr_group(unsigned d) {
if (d >= g_num_groups)
d = g_num_groups-1;
return static_cast<cnstr_group>(d);
static unsigned get_group_last_index(cnstr_group g) {
unsigned idx = static_cast<unsigned>(g);
lean_assert(idx < g_num_groups);
if (idx == g_num_groups - 1) {
lean_assert(g == cnstr_group::MaxDelayed);
return std::numeric_limits<unsigned>::max() - g_group_size;
} else {
lean_assert(idx < g_num_groups - 1);
return g_cnstr_group_first_index[idx+1] - 1;
}
}
static cnstr_group to_cnstr_group(unsigned cidx) {
for (unsigned i = 1; i < g_num_groups; i++) {
if (cidx < g_cnstr_group_first_index[i])
return static_cast<cnstr_group>(i);
}
return cnstr_group::MaxDelayed;
}
/** \brief Convert choice constraint delay factor to cnstr_group */
@ -329,7 +344,7 @@ struct unifier_fn {
name_generator m_ngen;
substitution m_subst;
constraints m_postponed; // constraints that will not be solved
owned_map m_owned_map; // mapping from metavariable name m to delay factor of the choice constraint that owns m
owned_map m_owned_map; // mapping from metavariable name m to constraint idx
expr_map m_type_map; // auxiliary map for storing the type of the expr in choice constraints
unifier_plugin m_plugin;
type_checker_ptr m_tc;
@ -542,14 +557,46 @@ struct unifier_fn {
return added;
}
/** \brief Add constraint to the constraint queue */
/** \brief Add constraint to the constraint queue.
\remark We use FIFO policy for all but MaxDelayed group.
*/
unsigned add_cnstr(constraint const & c, cnstr_group g) {
unsigned cidx = m_next_cidx + get_group_first_index(g);
unsigned cidx;
if (g == cnstr_group::MaxDelayed) {
// MaxDelayed is a stack
cidx = get_group_last_index(g) - m_next_cidx;
/* Make sure there is at least one free space between any two MAX_DELAYED constraints.
We want to put constraints containing the variable owned by a MAX_DELAYED constraint c
after it and before the next MAX_DELAYED constraint c. */
m_next_cidx += 2;
} else {
cidx = m_next_cidx + get_group_first_index(g);
m_next_cidx++;
}
m_cnstrs.insert(cnstr(c, cidx));
m_next_cidx++;
return cidx;
}
/** \brief Add constraint \c to the constraint queue, but
make sure it occurs AFTER the the constraint with index \c cidx.
\remark We used this method to make sure \c c is going to be
processed after the constraint \c cidx. */
unsigned add_cnstr_after(constraint const & c, unsigned cidx) {
cnstr_group g = to_cnstr_group(cidx);
unsigned new_cidx;
if (g == cnstr_group::MaxDelayed) {
// See add_cnstr
new_cidx = cidx + 1;
} else {
new_cidx = m_next_cidx + get_group_first_index(g);
m_next_cidx++;
}
m_cnstrs.insert(cnstr(c, new_cidx));
return new_cidx;
}
/** \brief Check if \c t1 and \c t2 are definitionally equal, if they are not, set a conflict with justification \c j
*/
bool is_def_eq(expr const & t1, expr const & t2, justification const & j) {
@ -993,7 +1040,7 @@ struct unifier_fn {
if (auto d = is_owned(c)) {
// Metavariable in the constraint is owned by choice constraint.
// So, we postpone this constraint.
add_cnstr(c, to_cnstr_group(*d+1));
add_cnstr_after(c, *d);
return true;
}
@ -1126,12 +1173,12 @@ struct unifier_fn {
bool preprocess_choice_constraint(constraint c) {
if (!cnstr_on_demand(c)) {
unsigned cidx = add_cnstr(c, get_choice_cnstr_group(c));
if (cnstr_is_owner(c)) {
expr m = get_app_fn(cnstr_expr(c));
lean_assert(is_metavar(m));
m_owned_map.insert(mlocal_name(m), cnstr_delay_factor(c));
m_owned_map.insert(mlocal_name(m), cidx);
}
add_cnstr(c, get_choice_cnstr_group(c));
return true;
} else {
expr m = cnstr_expr(c);
@ -2621,6 +2668,10 @@ struct unifier_fn {
return Continue;
}
void cut_search() {
m_case_splits.clear();
}
/** \brief Process the next constraint in the constraint queue m_cnstrs */
bool process_next() {
lean_assert(!m_cnstrs.empty());
@ -2634,7 +2685,11 @@ struct unifier_fn {
postpone(c);
return true;
}
lean_trace("unifier", tout() << "process_next: " << c << "\n";);
if (cidx >= get_group_first_index(cnstr_group::Checkpoint) &&
cidx < get_group_first_index(cnstr_group::FlexFlex)) {
cut_search();
}
lean_trace("unifier", tout() << "process_next: " << cidx << " " << static_cast<unsigned>(to_cnstr_group(cidx)) << " " << c << "\n";);
m_cnstrs.erase_min();
if (is_choice_cnstr(c)) {
return process_choice_constraint(c);
@ -2684,7 +2739,7 @@ struct unifier_fn {
// Metavariable in the constraint is owned by choice constraint.
// choice constraint was postponed... since c was not modifed
// So, we should postpone this one too.
add_cnstr(c, to_cnstr_group(*d+1));
add_cnstr_after(c, *d);
return true;
} else if (is_flex_rigid(c)) {
return process_flex_rigid(c);

View file

@ -100,13 +100,16 @@ unify_result_seq unify(environment const & env, expr const & lhs, expr const & r
7) Epilogue: constraints that must be solved before FlexFlex are discarded/postponed.
8) FlexFlex: (?m1 ...) =?= (?m2 ...) we don't try to solve this constraint, we delay them and hope the other
8) Checkpoint: unifier performs a prolog-like cut for any constraint in this group.
9) FlexFlex: (?m1 ...) =?= (?m2 ...) we don't try to solve this constraint, we delay them and hope the other
ones instantiate ?m1 or ?m2. If this kind of constraint is the next to be processed in the queue, then
we simply discard it (or save it and return to the caller as residue).
9) MaxDelayed: maximally delayed constraint group
10) MaxDelayed: maximally delayed constraint group
*/
enum class cnstr_group { Basic = 0, FlexRigid, PluginDelayed, DelayedChoice, ClassInstance, Epilogue, FlexFlex, MaxDelayed };
enum class cnstr_group { Basic = 0, FlexRigid, PluginDelayed, DelayedChoice, ClassInstance,
Epilogue, Checkpoint, FlexFlex, MaxDelayed };
inline unsigned to_delay_factor(cnstr_group g) { return static_cast<unsigned>(g); }
class unifier_exception : public exception {