Skip to content

Commit b738b06

Browse files
committed
update cache
1 parent aaa9bb9 commit b738b06

File tree

4 files changed

+91
-112
lines changed

4 files changed

+91
-112
lines changed

Cargo.lock

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4783,6 +4783,7 @@ dependencies = [
47834783
"rustc_middle",
47844784
"rustc_parse_format",
47854785
"rustc_query_system",
4786+
"rustc_serialize",
47864787
"rustc_session",
47874788
"rustc_span",
47884789
"rustc_target",

compiler/rustc_trait_selection/Cargo.toml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@ rustc_infer = { path = "../rustc_infer" }
1919
rustc_lint_defs = { path = "../rustc_lint_defs" }
2020
rustc_macros = { path = "../rustc_macros" }
2121
rustc_query_system = { path = "../rustc_query_system" }
22+
rustc_serialize = { path = "../rustc_serialize" }
2223
rustc_session = { path = "../rustc_session" }
2324
rustc_span = { path = "../rustc_span" }
2425
rustc_target = { path = "../rustc_target" }

compiler/rustc_trait_selection/src/lib.rs

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,7 @@
2121
#![feature(never_type)]
2222
#![feature(result_option_inspect)]
2323
#![feature(type_alias_impl_trait)]
24+
#![feature(min_specialization)]
2425
#![recursion_limit = "512"] // For rustdoc
2526

2627
#[macro_use]

compiler/rustc_trait_selection/src/solve/cache.rs

Lines changed: 88 additions & 112 deletions
Original file line numberDiff line numberDiff line change
@@ -11,84 +11,60 @@
1111
use super::overflow::OverflowData;
1212
use super::{CanonicalGoal, Certainty, MaybeCause, Response};
1313
use super::{EvalCtxt, QueryResult};
14-
1514
use rustc_data_structures::fx::FxHashMap;
15+
use rustc_index::vec::IndexVec;
1616
use rustc_infer::infer::canonical::{Canonical, CanonicalVarKind, CanonicalVarValues};
1717
use rustc_middle::ty::{self, TyCtxt};
18-
use std::{cmp::Ordering, collections::hash_map::Entry};
18+
use std::collections::hash_map::Entry;
19+
20+
rustc_index::newtype_index! {
21+
pub struct StackDepth {}
22+
}
23+
rustc_index::newtype_index! {
24+
pub struct EntryIndex {}
25+
}
1926

2027
#[derive(Debug, Clone)]
2128
struct ProvisionalEntry<'tcx> {
2229
// In case we have a coinductive cycle, this is the
2330
// the currently least restrictive result of this goal.
2431
response: QueryResult<'tcx>,
25-
// The lowest element on the stack on which this result
26-
// relies on. Starts out as just being the depth at which
27-
// we've proven this obligation, but gets lowered to the
28-
// depth of another goal if we rely on it in a cycle.
29-
depth: usize,
32+
// In case of a cycle, the depth of lowest stack entry involved
33+
// in that cycle. This is monotonically decreasing in the stack as all
34+
// elements between the current stack element in the lowest stack entry
35+
// involved have to also be involved in that cycle.
36+
//
37+
// We can only move entries to the global cache once we're complete done
38+
// with the cycle. If this entry has not been involved in a cycle,
39+
// this is just its own depth.
40+
depth: StackDepth,
41+
42+
// The goal for this entry. Should always be equal to the corresponding goal
43+
// in the lookup table.
44+
goal: CanonicalGoal<'tcx>,
3045
}
3146

3247
struct StackElem<'tcx> {
3348
goal: CanonicalGoal<'tcx>,
3449
has_been_used: bool,
3550
}
3651

37-
/// The cache used for goals which are currently in progress or which depend
38-
/// on in progress results.
39-
///
40-
/// Once we're done with a goal we can store it in the global trait solver
41-
/// cache of the `TyCtxt`. For goals which we're currently proving, or which
42-
/// have only been proven via a coinductive cycle using a goal still on our stack
43-
/// we have to use this separate data structure.
44-
///
45-
/// The current data structure is not perfect, so there may still be room for
46-
/// improvement here. We have the following requirements:
47-
///
48-
/// ## Is there is a provisional entry for the given goal:
49-
///
50-
/// ```ignore (for syntax highlighting)
51-
/// self.entries.get(goal)
52-
/// ```
53-
///
54-
/// ## Get all goals on the stack involved in a cycle:
55-
///
56-
/// ```ignore (for syntax highlighting)
57-
/// let entry = self.entries.get(goal).unwrap();
58-
/// let involved_goals = self.stack.iter().skip(entry.depth);
59-
/// ```
60-
///
61-
/// ## Capping the depth of all entries
62-
///
63-
/// Needed whenever we encounter a cycle. The current implementation always
64-
/// iterates over all entries instead of only the ones with a larger depth.
65-
/// Changing this may result in notable performance improvements.
66-
///
67-
/// ```ignore (for syntax highlighting)
68-
/// let cycle_depth = self.entries.get(goal).unwrap().depth;
69-
/// for e in &mut self.entries {
70-
/// e.depth = e.depth.min(cycle_depth);
71-
/// }
72-
/// ```
73-
///
74-
/// ## Checking whether we have to rerun the current goal
75-
///
76-
/// A goal has to be rerun if its provisional result was used in a cycle
77-
/// and that result is different from its final result. We update
78-
/// [StackElem::has_been_used] for the deepest stack element involved in a cycle.
79-
///
80-
/// ## Moving all finished goals into the global cache
81-
///
82-
/// If `stack_elem.has_been_used` is true, iterate over all entries, moving the ones
83-
/// with equal depth. If not, simply move this single entry.
8452
pub(super) struct ProvisionalCache<'tcx> {
85-
stack: Vec<StackElem<'tcx>>,
86-
entries: FxHashMap<CanonicalGoal<'tcx>, ProvisionalEntry<'tcx>>,
53+
stack: IndexVec<StackDepth, StackElem<'tcx>>,
54+
entries: IndexVec<EntryIndex, ProvisionalEntry<'tcx>>,
55+
// FIXME: This is only used to quickly check whether a given goal
56+
// is in the cache. We should experiment with using something like
57+
// `SsoHashSet` here because in most cases there are only a few entries.
58+
lookup_table: FxHashMap<CanonicalGoal<'tcx>, EntryIndex>,
8759
}
8860

8961
impl<'tcx> ProvisionalCache<'tcx> {
9062
pub(super) fn empty() -> ProvisionalCache<'tcx> {
91-
ProvisionalCache { stack: Vec::new(), entries: Default::default() }
63+
ProvisionalCache {
64+
stack: Default::default(),
65+
entries: Default::default(),
66+
lookup_table: Default::default(),
67+
}
9268
}
9369

9470
pub(super) fn current_depth(&self) -> usize {
@@ -108,18 +84,17 @@ impl<'tcx> EvalCtxt<'tcx> {
10884

10985
// Look at the provisional cache to check for cycles.
11086
let cache = &mut self.provisional_cache;
111-
match cache.entries.entry(goal) {
87+
match cache.lookup_table.entry(goal) {
11288
// No entry, simply push this goal on the stack after dealing with overflow.
11389
Entry::Vacant(v) => {
11490
if self.overflow_data.has_overflow(cache.stack.len()) {
11591
return Err(self.deal_with_overflow(goal));
11692
}
11793

118-
v.insert(ProvisionalEntry {
119-
response: response_no_constraints(self.tcx, goal, Certainty::Yes),
120-
depth: cache.stack.len(),
121-
});
122-
cache.stack.push(StackElem { goal, has_been_used: false });
94+
let depth = cache.stack.push(StackElem { goal, has_been_used: false });
95+
let response = response_no_constraints(self.tcx, goal, Certainty::Yes);
96+
let entry_index = cache.entries.push(ProvisionalEntry { response, depth, goal });
97+
v.insert(entry_index);
12398
Ok(())
12499
}
125100
// We have a nested goal which relies on a goal `root` deeper in the stack.
@@ -131,21 +106,22 @@ impl<'tcx> EvalCtxt<'tcx> {
131106
//
132107
// Finally we can return either the provisional response for that goal if we have a
133108
// coinductive cycle or an ambiguous result if the cycle is inductive.
134-
Entry::Occupied(entry) => {
135-
// FIXME: `ProvisionalEntry` should be `Copy`.
136-
let entry = entry.get().clone();
109+
Entry::Occupied(entry_index) => {
110+
let entry_index = *entry_index.get();
111+
// FIXME `ProvisionalEntry` should be `Copy`.
112+
let entry = cache.entries.get(entry_index).unwrap().clone();
137113
cache.stack[entry.depth].has_been_used = true;
138-
for provisional_entry in cache.entries.values_mut() {
114+
for provisional_entry in cache.entries.iter_mut().skip(entry_index.index()) {
139115
provisional_entry.depth = provisional_entry.depth.min(entry.depth);
140116
}
141117

142118
// NOTE: The goals on the stack aren't the only goals involved in this cycle.
143119
// We can also depend on goals which aren't part of the stack but coinductively
144120
// depend on the stack themselves. We already checked whether all the goals
145121
// between these goals and their root on the stack. This means that as long as
146-
// each goal in a cycle is checked for coinductivity by itself simply checking
122+
// each goal in a cycle is checked for coinductivity by itself, simply checking
147123
// the stack is enough.
148-
if cache.stack[entry.depth..]
124+
if cache.stack.raw[entry.depth.index()..]
149125
.iter()
150126
.all(|g| g.goal.value.predicate.is_coinductive(self.tcx))
151127
{
@@ -154,7 +130,7 @@ impl<'tcx> EvalCtxt<'tcx> {
154130
Err(response_no_constraints(
155131
self.tcx,
156132
goal,
157-
Certainty::Maybe(MaybeCause::Ambiguity),
133+
Certainty::Maybe(MaybeCause::Overflow),
158134
))
159135
}
160136
}
@@ -182,57 +158,57 @@ impl<'tcx> EvalCtxt<'tcx> {
182158
let StackElem { goal, has_been_used } = cache.stack.pop().unwrap();
183159
assert_eq!(goal, actual_goal);
184160

185-
let provisional_entry = cache.entries.get_mut(&goal).unwrap();
186-
// Check whether the current stack entry is the root of a cycle.
187-
//
188-
// If so, we either move all participants of that cycle to the global cache
189-
// or, in case the provisional response used in the cycle is not equal to the
190-
// final response, have to recompute the goal after updating the provisional
191-
// response to the final response of this iteration.
192-
if has_been_used {
193-
if provisional_entry.response == response {
194-
// We simply drop all entries according to an immutable condition, so
195-
// query instability is not a concern here.
196-
#[allow(rustc::potential_query_instability)]
197-
cache.entries.retain(|goal, entry| match entry.depth.cmp(&cache.stack.len()) {
198-
Ordering::Less => true,
199-
Ordering::Equal => {
200-
Self::try_move_finished_goal_to_global_cache(
201-
self.tcx,
202-
&mut self.overflow_data,
203-
&cache.stack,
204-
// FIXME: these should be `Copy` :(
205-
goal.clone(),
206-
entry.response.clone(),
207-
);
208-
false
209-
}
210-
Ordering::Greater => bug!("entry with greater depth than the current leaf"),
211-
});
161+
let provisional_entry_index = *cache.lookup_table.get(&goal).unwrap();
162+
let provisional_entry = &mut cache.entries[provisional_entry_index];
163+
// Was the current goal the root of a cycle and was the provisional response
164+
// different from the final one.
165+
if has_been_used && provisional_entry.response != response {
166+
// If so, update the provisional reponse for this goal...
167+
provisional_entry.response = response;
168+
// ...remove all entries whose result depends on this goal
169+
// from the provisional cache...
170+
//
171+
// That's not completely correct, as a nested goal can also
172+
// depend on a goal which is lower in the stack so it doesn't
173+
// actually depend on the current goal. This should be fairly
174+
// rare and is hopefully not relevant for performance.
175+
#[allow(rustc::potential_query_instability)]
176+
cache.lookup_table.retain(|_key, index| *index <= provisional_entry_index);
177+
cache.entries.truncate(provisional_entry_index.index() + 1);
212178

213-
true
214-
} else {
215-
provisional_entry.response = response;
216-
cache.stack.push(StackElem { goal, has_been_used: false });
217-
false
218-
}
179+
// ...and finally push our goal back on the stack and reevaluate it.
180+
cache.stack.push(StackElem { goal, has_been_used: false });
181+
false
219182
} else {
220-
Self::try_move_finished_goal_to_global_cache(
221-
self.tcx,
222-
&mut self.overflow_data,
223-
&cache.stack,
224-
goal,
225-
response,
226-
);
227-
cache.entries.remove(&goal);
183+
// If not, we're done with this goal.
184+
//
185+
// Check whether that this goal doesn't depend on a goal deeper on the stack
186+
// and if so, move it and all nested goals to the global cache.
187+
//
188+
// Note that if any nested goal were to depend on something deeper on the stack,
189+
// this would have also updated the depth of this goal.
190+
if provisional_entry.depth == cache.stack.next_index() {
191+
for (i, entry) in cache.entries.drain_enumerated(provisional_entry_index.index()..)
192+
{
193+
let actual_index = cache.lookup_table.remove(&entry.goal);
194+
debug_assert_eq!(Some(i), actual_index);
195+
Self::try_move_finished_goal_to_global_cache(
196+
self.tcx,
197+
&mut self.overflow_data,
198+
&cache.stack,
199+
entry.goal,
200+
entry.response,
201+
);
202+
}
203+
}
228204
true
229205
}
230206
}
231207

232208
fn try_move_finished_goal_to_global_cache(
233209
tcx: TyCtxt<'tcx>,
234210
overflow_data: &mut OverflowData,
235-
stack: &[StackElem<'tcx>],
211+
stack: &IndexVec<StackDepth, StackElem<'tcx>>,
236212
goal: CanonicalGoal<'tcx>,
237213
response: QueryResult<'tcx>,
238214
) {

0 commit comments

Comments
 (0)