@@ -345,18 +345,6 @@ impl<D: Deps> DepGraphData<D> {
345345 task : fn ( Ctxt , A ) -> R ,
346346 hash_result : Option < fn ( & mut StableHashingContext < ' _ > , & R ) -> Fingerprint > ,
347347 ) -> ( R , DepNodeIndex ) {
348- // If the following assertion triggers, it can have two reasons:
349- // 1. Something is wrong with DepNode creation, either here or
350- // in `DepGraph::try_mark_green()`.
351- // 2. Two distinct query keys get mapped to the same `DepNode`
352- // (see for example #48923).
353- assert ! (
354- !self . dep_node_exists( & key) ,
355- "forcing query with already existing `DepNode`\n \
356- - query-key: {arg:?}\n \
357- - dep-node: {key:?}"
358- ) ;
359-
360348 let with_deps = |task_deps| D :: with_deps ( task_deps, || task ( cx, arg) ) ;
361349 let ( result, edges) = if cx. dep_context ( ) . is_eval_always ( key. kind ) {
362350 ( with_deps ( TaskDepsRef :: EvalAlways ) , EdgesVec :: new ( ) )
@@ -443,7 +431,31 @@ impl<D: Deps> DepGraphData<D> {
443431 hash : self . current . anon_id_seed . combine ( hasher. finish ( ) ) . into ( ) ,
444432 } ;
445433
446- self . current . intern_new_node ( target_dep_node, task_deps, Fingerprint :: ZERO )
434+ // The DepNodes generated by the process above are not unique. 2 queries could
435+ // have exactly the same dependencies. However, deserialization does not handle
436+ // duplicated nodes, so we do the deduplication here directly.
437+ //
438+ // As anonymous nodes are a small quantity compared to the full dep-graph, the
439+ // memory impact of this `anon_node_to_index` map remains tolerable, and helps
440+ // us avoid useless growth of the graph with almost-equivalent nodes.
441+ match self
442+ . current
443+ . anon_node_to_index
444+ . get_shard_by_value ( & target_dep_node)
445+ . lock ( )
446+ . entry ( target_dep_node)
447+ {
448+ Entry :: Occupied ( entry) => * entry. get ( ) ,
449+ Entry :: Vacant ( entry) => {
450+ let dep_node_index = self . current . intern_new_node (
451+ target_dep_node,
452+ task_deps,
453+ Fingerprint :: ZERO ,
454+ ) ;
455+ entry. insert ( dep_node_index) ;
456+ dep_node_index
457+ }
458+ }
447459 }
448460 } ;
449461
@@ -607,20 +619,6 @@ impl<D: Deps> DepGraph<D> {
607619}
608620
609621impl < D : Deps > DepGraphData < D > {
610- #[ inline]
611- fn dep_node_index_of_opt ( & self , dep_node : & DepNode ) -> Option < DepNodeIndex > {
612- if let Some ( prev_index) = self . previous . node_to_index_opt ( dep_node) {
613- self . current . prev_index_to_index . lock ( ) [ prev_index]
614- } else {
615- self . current . new_node_to_index . lock_shard_by_value ( dep_node) . get ( dep_node) . copied ( )
616- }
617- }
618-
619- #[ inline]
620- fn dep_node_exists ( & self , dep_node : & DepNode ) -> bool {
621- self . dep_node_index_of_opt ( dep_node) . is_some ( )
622- }
623-
624622 fn node_color ( & self , dep_node : & DepNode ) -> Option < DepNodeColor > {
625623 if let Some ( prev_index) = self . previous . node_to_index_opt ( dep_node) {
626624 self . colors . get ( prev_index)
@@ -653,11 +651,6 @@ impl<D: Deps> DepGraphData<D> {
653651}
654652
655653impl < D : Deps > DepGraph < D > {
656- #[ inline]
657- pub fn dep_node_exists ( & self , dep_node : & DepNode ) -> bool {
658- self . data . as_ref ( ) . is_some_and ( |data| data. dep_node_exists ( dep_node) )
659- }
660-
661654 /// Checks whether a previous work product exists for `v` and, if
662655 /// so, return the path that leads to it. Used to skip doing work.
663656 pub fn previous_work_product ( & self , v : & WorkProductId ) -> Option < WorkProduct > {
@@ -1025,24 +1018,24 @@ rustc_index::newtype_index! {
10251018/// largest in the compiler.
10261019///
10271020/// For this reason, we avoid storing `DepNode`s more than once as map
1028- /// keys. The `new_node_to_index ` map only contains nodes not in the previous
1021+ /// keys. The `anon_node_to_index ` map only contains nodes of anonymous queries not in the previous
10291022/// graph, and we map nodes in the previous graph to indices via a two-step
10301023/// mapping. `SerializedDepGraph` maps from `DepNode` to `SerializedDepNodeIndex`,
10311024/// and the `prev_index_to_index` vector (which is more compact and faster than
10321025/// using a map) maps from `SerializedDepNodeIndex` to `DepNodeIndex`.
10331026///
1034- /// This struct uses three locks internally. The `data`, `new_node_to_index `,
1027+ /// This struct uses three locks internally. The `data`, `anon_node_to_index `,
10351028/// and `prev_index_to_index` fields are locked separately. Operations that take
10361029/// a `DepNodeIndex` typically just access the `data` field.
10371030///
10381031/// We only need to manipulate at most two locks simultaneously:
1039- /// `new_node_to_index ` and `data`, or `prev_index_to_index` and `data`. When
1040- /// manipulating both, we acquire `new_node_to_index ` or `prev_index_to_index`
1032+ /// `anon_node_to_index ` and `data`, or `prev_index_to_index` and `data`. When
1033+ /// manipulating both, we acquire `anon_node_to_index ` or `prev_index_to_index`
10411034/// first, and `data` second.
10421035pub ( super ) struct CurrentDepGraph < D : Deps > {
10431036 encoder : GraphEncoder < D > ,
1044- new_node_to_index : Sharded < FxHashMap < DepNode , DepNodeIndex > > ,
10451037 prev_index_to_index : Lock < IndexVec < SerializedDepNodeIndex , Option < DepNodeIndex > > > ,
1038+ anon_node_to_index : Sharded < FxHashMap < DepNode , DepNodeIndex > > ,
10461039
10471040 /// This is used to verify that fingerprints do not change between the creation of a node
10481041 /// and its recomputation.
@@ -1110,7 +1103,7 @@ impl<D: Deps> CurrentDepGraph<D> {
11101103 profiler,
11111104 previous,
11121105 ) ,
1113- new_node_to_index : Sharded :: new ( || {
1106+ anon_node_to_index : Sharded :: new ( || {
11141107 FxHashMap :: with_capacity_and_hasher (
11151108 new_node_count_estimate / sharded:: shards ( ) ,
11161109 Default :: default ( ) ,
@@ -1145,14 +1138,7 @@ impl<D: Deps> CurrentDepGraph<D> {
11451138 edges : EdgesVec ,
11461139 current_fingerprint : Fingerprint ,
11471140 ) -> DepNodeIndex {
1148- let dep_node_index = match self . new_node_to_index . lock_shard_by_value ( & key) . entry ( key) {
1149- Entry :: Occupied ( entry) => * entry. get ( ) ,
1150- Entry :: Vacant ( entry) => {
1151- let dep_node_index = self . encoder . send ( key, current_fingerprint, edges) ;
1152- entry. insert ( dep_node_index) ;
1153- dep_node_index
1154- }
1155- } ;
1141+ let dep_node_index = self . encoder . send ( key, current_fingerprint, edges) ;
11561142
11571143 #[ cfg( debug_assertions) ]
11581144 self . record_edge ( dep_node_index, key, current_fingerprint) ;
@@ -1222,8 +1208,6 @@ impl<D: Deps> CurrentDepGraph<D> {
12221208 prev_graph : & SerializedDepGraph ,
12231209 prev_index : SerializedDepNodeIndex ,
12241210 ) -> DepNodeIndex {
1225- self . debug_assert_not_in_new_nodes ( prev_graph, prev_index) ;
1226-
12271211 let mut prev_index_to_index = self . prev_index_to_index . lock ( ) ;
12281212
12291213 match prev_index_to_index[ prev_index] {
@@ -1241,19 +1225,6 @@ impl<D: Deps> CurrentDepGraph<D> {
12411225 }
12421226 }
12431227 }
1244-
1245- #[ inline]
1246- fn debug_assert_not_in_new_nodes (
1247- & self ,
1248- prev_graph : & SerializedDepGraph ,
1249- prev_index : SerializedDepNodeIndex ,
1250- ) {
1251- let node = & prev_graph. index_to_node ( prev_index) ;
1252- debug_assert ! (
1253- !self . new_node_to_index. lock_shard_by_value( node) . contains_key( node) ,
1254- "node from previous graph present in new node collection"
1255- ) ;
1256- }
12571228}
12581229
12591230#[ derive( Debug , Clone , Copy ) ]
@@ -1375,7 +1346,7 @@ fn panic_on_forbidden_read<D: Deps>(data: &DepGraphData<D>, dep_node_index: DepN
13751346
13761347 if dep_node. is_none ( ) {
13771348 // Try to find it among the new nodes
1378- for shard in data. current . new_node_to_index . lock_shards ( ) {
1349+ for shard in data. current . anon_node_to_index . lock_shards ( ) {
13791350 if let Some ( ( node, _) ) = shard. iter ( ) . find ( |( _, index) | * * index == dep_node_index) {
13801351 dep_node = Some ( * node) ;
13811352 break ;
0 commit comments