fix(kernel/expr,kernel/level): performance problem
The method unordered_set::clear takes time O(n) where n is the number of buckets in the hashtable used to implement the set. Moreover, it does not reduce the number of buckets to 0. That is, it keeps `n` empty buckets. This creates performance problems if the number of buckets gets really huge at one point. The tst6 at tests/kernel/expr.cpp demonstrates the problem
This commit is contained in:
parent
d07bbe7e8c
commit
cb2f93ee6a
2 changed files with 7 additions and 5 deletions
|
@ -312,7 +312,8 @@ bool enable_expr_caching(bool f) {
|
|||
bool r2 = g_expr_cache_enabled;
|
||||
lean_verify(r1 == r2);
|
||||
g_expr_cache_enabled = f;
|
||||
get_expr_cache().clear();
|
||||
expr_cache new_cache;
|
||||
get_expr_cache().swap(new_cache);
|
||||
return r2;
|
||||
}
|
||||
inline expr cache(expr const & e) {
|
||||
|
|
|
@ -261,20 +261,21 @@ level const & mk_level_zero() { return *g_level_zero; }
|
|||
level const & mk_level_one() { return *g_level_one; }
|
||||
bool is_one(level const & l) { return l == mk_level_one(); }
|
||||
|
||||
typedef typename std::unordered_set<level, level_hash> level_table;
|
||||
typedef typename std::unordered_set<level, level_hash> level_cache;
|
||||
LEAN_THREAD_VALUE(bool, g_level_cache_enabled, true);
|
||||
MK_THREAD_LOCAL_GET_DEF(level_table, get_level_cache);
|
||||
MK_THREAD_LOCAL_GET_DEF(level_cache, get_level_cache);
|
||||
bool enable_level_caching(bool f) {
|
||||
bool r = g_level_cache_enabled;
|
||||
g_level_cache_enabled = f;
|
||||
get_level_cache().clear();
|
||||
level_cache new_cache;
|
||||
get_level_cache().swap(new_cache);
|
||||
get_level_cache().insert(mk_level_zero());
|
||||
get_level_cache().insert(mk_level_one());
|
||||
return r;
|
||||
}
|
||||
level cache(level const & e) {
|
||||
if (g_level_cache_enabled) {
|
||||
level_table & cache = get_level_cache();
|
||||
level_cache & cache = get_level_cache();
|
||||
auto it = cache.find(e);
|
||||
if (it != cache.end()) {
|
||||
return *it;
|
||||
|
|
Loading…
Reference in a new issue