@@ -15,6 +15,7 @@ use rustc_data_structures::fx::FxHashMap;
15
15
use rustc_data_structures:: profiling:: TimingGuard ;
16
16
#[ cfg( parallel_compiler) ]
17
17
use rustc_data_structures:: sharded:: Sharded ;
18
+ use rustc_data_structures:: stack:: ensure_sufficient_stack;
18
19
use rustc_data_structures:: sync:: Lock ;
19
20
use rustc_errors:: { DiagnosticBuilder , ErrorGuaranteed , FatalError } ;
20
21
use rustc_session:: Session ;
@@ -188,12 +189,12 @@ where
188
189
#[ cfg( not( parallel_compiler) ) ]
189
190
let mut state_lock = state. active . lock ( ) ;
190
191
let lock = & mut * state_lock;
192
+ let current_job_id = qcx. current_query_job ( ) ;
191
193
192
194
match lock. entry ( key) {
193
195
Entry :: Vacant ( entry) => {
194
196
let id = qcx. next_job_id ( ) ;
195
- let job = qcx. current_query_job ( ) ;
196
- let job = QueryJob :: new ( id, span, job) ;
197
+ let job = QueryJob :: new ( id, span, current_job_id) ;
197
198
198
199
let key = * entry. key ( ) ;
199
200
entry. insert ( QueryResult :: Started ( job) ) ;
@@ -212,7 +213,7 @@ where
212
213
// so we just return the error.
213
214
return TryGetJob :: Cycle ( id. find_cycle_in_stack (
214
215
qcx. try_collect_active_jobs ( ) . unwrap ( ) ,
215
- & qcx . current_query_job ( ) ,
216
+ & current_job_id ,
216
217
span,
217
218
) ) ;
218
219
}
@@ -230,7 +231,7 @@ where
230
231
231
232
// With parallel queries we might just have to wait on some other
232
233
// thread.
233
- let result = latch. wait_on ( qcx . current_query_job ( ) , span) ;
234
+ let result = latch. wait_on ( current_job_id , span) ;
234
235
235
236
match result {
236
237
Ok ( ( ) ) => TryGetJob :: JobCompleted ( query_blocked_prof_timer) ,
@@ -346,10 +347,9 @@ where
346
347
}
347
348
}
348
349
350
+ #[ inline( never) ]
349
351
fn try_execute_query < Q , Qcx > (
350
352
qcx : Qcx ,
351
- state : & QueryState < Q :: Key , Qcx :: DepKind > ,
352
- cache : & Q :: Cache ,
353
353
span : Span ,
354
354
key : Q :: Key ,
355
355
dep_node : Option < DepNode < Qcx :: DepKind > > ,
@@ -358,9 +358,11 @@ where
358
358
Q : QueryConfig < Qcx > ,
359
359
Qcx : QueryContext ,
360
360
{
361
+ let state = Q :: query_state ( qcx) ;
361
362
match JobOwner :: < ' _ , Q :: Key , Qcx :: DepKind > :: try_start ( & qcx, state, span, key) {
362
363
TryGetJob :: NotYetStarted ( job) => {
363
364
let ( result, dep_node_index) = execute_job :: < Q , Qcx > ( qcx, key, dep_node, job. id ) ;
365
+ let cache = Q :: query_cache ( qcx) ;
364
366
if Q :: FEEDABLE {
365
367
// We should not compute queries that also got a value via feeding.
366
368
// This can't happen, as query feeding adds the very dependencies to the fed query
@@ -381,7 +383,7 @@ where
381
383
}
382
384
#[ cfg( parallel_compiler) ]
383
385
TryGetJob :: JobCompleted ( query_blocked_prof_timer) => {
384
- let Some ( ( v, index) ) = cache . lookup ( & key) else {
386
+ let Some ( ( v, index) ) = Q :: query_cache ( qcx ) . lookup ( & key) else {
385
387
panic ! ( "value must be in cache after waiting" )
386
388
} ;
387
389
@@ -393,6 +395,7 @@ where
393
395
}
394
396
}
395
397
398
+ #[ inline( always) ]
396
399
fn execute_job < Q , Qcx > (
397
400
qcx : Qcx ,
398
401
key : Q :: Key ,
@@ -478,6 +481,7 @@ where
478
481
( result, dep_node_index)
479
482
}
480
483
484
+ #[ inline( always) ]
481
485
fn try_load_from_disk_and_cache_in_memory < Q , Qcx > (
482
486
qcx : Qcx ,
483
487
key : & Q :: Key ,
@@ -568,6 +572,7 @@ where
568
572
Some ( ( result, dep_node_index) )
569
573
}
570
574
575
+ #[ inline]
571
576
#[ instrument( skip( tcx, result, hash_result) , level = "debug" ) ]
572
577
pub ( crate ) fn incremental_verify_ich < Tcx , V : Debug > (
573
578
tcx : Tcx ,
@@ -722,6 +727,7 @@ pub enum QueryMode {
722
727
Ensure ,
723
728
}
724
729
730
+ #[ inline( always) ]
725
731
pub fn get_query < Q , Qcx , D > ( qcx : Qcx , span : Span , key : Q :: Key , mode : QueryMode ) -> Option < Q :: Value >
726
732
where
727
733
D : DepKind ,
@@ -739,14 +745,8 @@ where
739
745
None
740
746
} ;
741
747
742
- let ( result, dep_node_index) = try_execute_query :: < Q , Qcx > (
743
- qcx,
744
- Q :: query_state ( qcx) ,
745
- Q :: query_cache ( qcx) ,
746
- span,
747
- key,
748
- dep_node,
749
- ) ;
748
+ let ( result, dep_node_index) =
749
+ ensure_sufficient_stack ( || try_execute_query :: < Q , Qcx > ( qcx, span, key, dep_node) ) ;
750
750
if let Some ( dep_node_index) = dep_node_index {
751
751
qcx. dep_context ( ) . dep_graph ( ) . read_index ( dep_node_index)
752
752
}
@@ -762,14 +762,12 @@ where
762
762
{
763
763
// We may be concurrently trying both execute and force a query.
764
764
// Ensure that only one of them runs the query.
765
- let cache = Q :: query_cache ( qcx) ;
766
- if let Some ( ( _, index) ) = cache. lookup ( & key) {
765
+ if let Some ( ( _, index) ) = Q :: query_cache ( qcx) . lookup ( & key) {
767
766
qcx. dep_context ( ) . profiler ( ) . query_cache_hit ( index. into ( ) ) ;
768
767
return ;
769
768
}
770
769
771
- let state = Q :: query_state ( qcx) ;
772
770
debug_assert ! ( !Q :: ANON ) ;
773
771
774
- try_execute_query :: < Q , _ > ( qcx, state , cache , DUMMY_SP , key, Some ( dep_node) ) ;
772
+ ensure_sufficient_stack ( || try_execute_query :: < Q , _ > ( qcx, DUMMY_SP , key, Some ( dep_node) ) ) ;
775
773
}
0 commit comments