forked from rust-lang/rust
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathprev.rs
100 lines (88 loc) · 3.94 KB
/
prev.rs
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
use super::serialized::{SerializedDepGraph, SerializedDepNodeIndex};
use super::{DepKind, DepNode};
use crate::dep_graph::DepContext;
use rustc_data_structures::fingerprint::Fingerprint;
use rustc_data_structures::fx::FxHashMap;
use rustc_span::def_id::DefPathHash;
#[derive(Debug, Encodable, Decodable)]
pub struct PreviousDepGraph<K: DepKind> {
data: SerializedDepGraph<K>,
index: FxHashMap<DepNode<K>, SerializedDepNodeIndex>,
}
impl<K: DepKind> Default for PreviousDepGraph<K> {
fn default() -> Self {
PreviousDepGraph { data: Default::default(), index: Default::default() }
}
}
impl<K: DepKind> PreviousDepGraph<K> {
pub fn new(data: SerializedDepGraph<K>) -> PreviousDepGraph<K> {
let index: FxHashMap<_, _> =
data.nodes.iter_enumerated().map(|(idx, &dep_node)| (dep_node, idx)).collect();
PreviousDepGraph { data, index }
}
#[inline]
pub fn edge_targets_from(
&self,
dep_node_index: SerializedDepNodeIndex,
) -> &[SerializedDepNodeIndex] {
self.data.edge_targets_from(dep_node_index)
}
#[inline]
pub fn index_to_node<CTX: DepContext<DepKind = K>>(
&self,
dep_node_index: SerializedDepNodeIndex,
tcx: CTX,
) -> DepNode<K> {
let dep_node = self.data.nodes[dep_node_index];
// We have just loaded a deserialized `DepNode` from the previous
// compilation session into the current one. If this was a foreign `DefId`,
// then we stored additional information in the incr comp cache when we
// initially created its fingerprint (see `DepNodeParams::to_fingerprint`)
// We won't be calling `to_fingerprint` again for this `DepNode` (we no longer
// have the original value), so we need to copy over this additional information
// from the old incremental cache into the new cache that we serialize
// and the end of this compilation session.
if dep_node.kind.can_reconstruct_query_key() {
tcx.register_reused_dep_path_hash(DefPathHash(dep_node.hash.into()));
}
dep_node
}
/// When debug assertions are enabled, asserts that the dep node at `dep_node_index` is equal to `dep_node`.
/// This method should be preferred over manually calling `index_to_node`.
/// Calls to `index_to_node` may affect global state, so gating a call
/// to `index_to_node` on debug assertions could cause behavior changes when debug assertions
/// are enabled.
#[inline]
pub fn debug_assert_eq(&self, dep_node_index: SerializedDepNodeIndex, dep_node: DepNode<K>) {
debug_assert_eq!(self.data.nodes[dep_node_index], dep_node);
}
/// Obtains a debug-printable version of the `DepNode`.
/// See `debug_assert_eq` for why this should be preferred over manually
/// calling `dep_node_index`
pub fn debug_dep_node(&self, dep_node_index: SerializedDepNodeIndex) -> impl std::fmt::Debug {
// We're returning the `DepNode` without calling `register_reused_dep_path_hash`,
// but `impl Debug` return type means that it can only be used for debug printing.
// So, there's no risk of calls trying to create new dep nodes that have this
// node as a dependency
self.data.nodes[dep_node_index]
}
#[inline]
pub fn node_to_index(&self, dep_node: &DepNode<K>) -> SerializedDepNodeIndex {
self.index[dep_node]
}
#[inline]
pub fn node_to_index_opt(&self, dep_node: &DepNode<K>) -> Option<SerializedDepNodeIndex> {
self.index.get(dep_node).cloned()
}
#[inline]
pub fn fingerprint_of(&self, dep_node: &DepNode<K>) -> Option<Fingerprint> {
self.index.get(dep_node).map(|&node_index| self.data.fingerprints[node_index])
}
#[inline]
pub fn fingerprint_by_index(&self, dep_node_index: SerializedDepNodeIndex) -> Fingerprint {
self.data.fingerprints[dep_node_index]
}
pub fn node_count(&self) -> usize {
self.index.len()
}
}