@@ -131,6 +131,9 @@ pub struct Scope<'tcx> {
131
131
132
132
/// The cache for drop chain on "generator drop" exit.
133
133
cached_generator_drop : Option < BasicBlock > ,
134
+
135
+ /// The cache for drop chain on "unwind" exit.
136
+ cached_unwind : CachedBlock ,
134
137
}
135
138
136
139
#[ derive( Debug ) ]
@@ -154,6 +157,11 @@ struct CachedBlock {
154
157
unwind : Option < BasicBlock > ,
155
158
156
159
/// The cached block for unwinds during cleanups-on-generator-drop path
160
+ ///
161
+ /// This is split from the standard unwind path here to prevent drop
162
+ /// elaboration from creating drop flags that would have to be captured
163
+ /// by the generator. I'm not sure how important this optimization is,
164
+ /// but it is here.
157
165
generator_drop : Option < BasicBlock > ,
158
166
}
159
167
@@ -217,34 +225,29 @@ impl<'tcx> Scope<'tcx> {
217
225
/// Should always be run for all inner scopes when a drop is pushed into some scope enclosing a
218
226
/// larger extent of code.
219
227
///
220
- /// `unwind` controls whether caches for the unwind branch are also invalidated.
221
- fn invalidate_cache ( & mut self , unwind : bool ) {
228
+ /// `storage_only` controls whether to invalidate only drop paths run `StorageDead`.
229
+ /// `this_scope_only` controls whether to invalidate only drop paths that refer to the current
230
+ /// top-of-scope (as opposed to dependent scopes).
231
+ fn invalidate_cache ( & mut self , storage_only : bool , this_scope_only : bool ) {
232
+ // FIXME: maybe do shared caching of `cached_exits` etc. to handle functions
233
+ // with lots of `try!`?
234
+
235
+ // cached exits drop storage and refer to the top-of-scope
222
236
self . cached_exits . clear ( ) ;
223
- if !unwind { return ; }
224
- for dropdata in & mut self . drops {
225
- if let DropKind :: Value { ref mut cached_block } = dropdata. kind {
226
- cached_block. invalidate ( ) ;
227
- }
237
+
238
+ if !storage_only {
239
+ // the current generator drop and unwind ignore
240
+ // storage but refer to top-of-scope
241
+ self . cached_generator_drop = None ;
242
+ self . cached_unwind . invalidate ( ) ;
228
243
}
229
- }
230
244
231
- /// Returns the cached entrypoint for diverging exit from this scope.
232
- ///
233
- /// Precondition: the caches must be fully filled (i.e. diverge_cleanup is called) in order for
234
- /// this method to work correctly.
235
- fn cached_block ( & self , generator_drop : bool ) -> Option < BasicBlock > {
236
- let mut drops = self . drops . iter ( ) . rev ( ) . filter_map ( |data| {
237
- match data. kind {
238
- DropKind :: Value { cached_block } => {
239
- Some ( cached_block. get ( generator_drop) )
245
+ if !storage_only && !this_scope_only {
246
+ for dropdata in & mut self . drops {
247
+ if let DropKind :: Value { ref mut cached_block } = dropdata. kind {
248
+ cached_block. invalidate ( ) ;
240
249
}
241
- DropKind :: Storage => None
242
250
}
243
- } ) ;
244
- if let Some ( cached_block) = drops. next ( ) {
245
- Some ( cached_block. expect ( "drop cache is not filled" ) )
246
- } else {
247
- None
248
251
}
249
252
}
250
253
@@ -356,7 +359,8 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
356
359
needs_cleanup : false ,
357
360
drops : vec ! [ ] ,
358
361
cached_generator_drop : None ,
359
- cached_exits : FxHashMap ( )
362
+ cached_exits : FxHashMap ( ) ,
363
+ cached_unwind : CachedBlock :: default ( ) ,
360
364
} ) ;
361
365
}
362
366
@@ -482,15 +486,16 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
482
486
TerminatorKind :: Goto { target : b } ) ;
483
487
b
484
488
} ;
489
+
490
+ // End all regions for scopes out of which we are breaking.
491
+ self . cfg . push_end_region ( self . hir . tcx ( ) , block, src_info, scope. region_scope ) ;
492
+
485
493
unpack ! ( block = build_scope_drops( & mut self . cfg,
486
494
scope,
487
495
rest,
488
496
block,
489
497
self . arg_count,
490
498
true ) ) ;
491
-
492
- // End all regions for scopes out of which we are breaking.
493
- self . cfg . push_end_region ( self . hir . tcx ( ) , block, src_info, scope. region_scope ) ;
494
499
}
495
500
496
501
self . cfg . terminate ( block, src_info, TerminatorKind :: GeneratorDrop ) ;
@@ -672,8 +677,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
672
677
// invalidating caches of each scope visited. This way bare minimum of the
673
678
// caches gets invalidated. i.e. if a new drop is added into the middle scope, the
674
679
// cache of outer scpoe stays intact.
675
- let invalidate_unwind = needs_drop && !this_scope;
676
- scope. invalidate_cache ( invalidate_unwind) ;
680
+ scope. invalidate_cache ( !needs_drop, this_scope) ;
677
681
if this_scope {
678
682
if let DropKind :: Value { .. } = drop_kind {
679
683
scope. needs_cleanup = true ;
@@ -819,30 +823,50 @@ fn build_scope_drops<'tcx>(cfg: &mut CFG<'tcx>,
819
823
generator_drop : bool )
820
824
-> BlockAnd < ( ) > {
821
825
debug ! ( "build_scope_drops({:?} -> {:?})" , block, scope) ;
822
- let mut iter = scope. drops . iter ( ) . rev ( ) . peekable ( ) ;
826
+ let mut iter = scope. drops . iter ( ) . rev ( ) ;
823
827
while let Some ( drop_data) = iter. next ( ) {
824
828
let source_info = scope. source_info ( drop_data. span ) ;
825
829
match drop_data. kind {
826
830
DropKind :: Value { .. } => {
827
- // Try to find the next block with its cached block
828
- // for us to diverge into in case the drop panics.
829
- let on_diverge = iter. peek ( ) . iter ( ) . filter_map ( |dd| {
831
+ // Try to find the next block with its cached block for us to
832
+ // diverge into, either a previous block in this current scope or
833
+ // the top of the previous scope.
834
+ //
835
+ // If it wasn't for EndRegion, we could just chain all the DropData
836
+ // together and pick the first DropKind::Value. Please do that
837
+ // when we replace EndRegion with NLL.
838
+ let on_diverge = iter. clone ( ) . filter_map ( |dd| {
830
839
match dd. kind {
831
- DropKind :: Value { cached_block } => {
832
- let result = cached_block. get ( generator_drop) ;
833
- if result. is_none ( ) {
834
- span_bug ! ( drop_data. span, "cached block not present?" )
835
- }
836
- result
837
- } ,
840
+ DropKind :: Value { cached_block } => Some ( cached_block) ,
838
841
DropKind :: Storage => None
839
842
}
840
- } ) . next ( ) ;
841
- // If there’s no `cached_block`s within current scope,
842
- // we must look for one in the enclosing scope.
843
- let on_diverge = on_diverge. or_else ( || {
844
- earlier_scopes. iter ( ) . rev ( ) . flat_map ( |s| s. cached_block ( generator_drop) ) . next ( )
843
+ } ) . next ( ) . or_else ( || {
844
+ if earlier_scopes. iter ( ) . any ( |scope| scope. needs_cleanup ) {
845
+ // If *any* scope requires cleanup code to be run,
846
+ // we must use the cached unwind from the *topmost*
847
+ // scope, to ensure all EndRegions from surrounding
848
+ // scopes are executed before the drop code runs.
849
+ Some ( earlier_scopes. last ( ) . unwrap ( ) . cached_unwind )
850
+ } else {
851
+ // We don't need any further cleanup, so return None
852
+ // to avoid creating a landing pad. We can skip
853
+ // EndRegions because all local regions end anyway
854
+ // when the function unwinds.
855
+ //
856
+ // This is an important optimization because LLVM is
857
+ // terrible at optimizing landing pads. FIXME: I think
858
+ // it would be cleaner and better to do this optimization
859
+ // in SimplifyCfg instead of here.
860
+ None
861
+ }
862
+ } ) ;
863
+
864
+ let on_diverge = on_diverge. map ( |cached_block| {
865
+ cached_block. get ( generator_drop) . unwrap_or_else ( || {
866
+ span_bug ! ( drop_data. span, "cached block not present?" )
867
+ } )
845
868
} ) ;
869
+
846
870
let next = cfg. start_new_block ( ) ;
847
871
cfg. terminate ( block, source_info, TerminatorKind :: Drop {
848
872
location : drop_data. location . clone ( ) ,
@@ -933,14 +957,23 @@ fn build_diverge_scope<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>,
933
957
} ;
934
958
}
935
959
936
- // Finally, push the EndRegion block, used by mir-borrowck. (Block
937
- // becomes trivial goto after pass that removes all EndRegions.)
938
- {
939
- let block = cfg. start_new_cleanup_block ( ) ;
940
- cfg. push_end_region ( tcx, block, source_info ( span) , scope. region_scope ) ;
941
- cfg. terminate ( block, source_info ( span) , TerminatorKind :: Goto { target : target } ) ;
942
- target = block
943
- }
960
+ // Finally, push the EndRegion block, used by mir-borrowck, and set
961
+ // `cached_unwind` to point to it (Block becomes trivial goto after
962
+ // pass that removes all EndRegions).
963
+ target = {
964
+ let cached_block = scope. cached_unwind . ref_mut ( generator_drop) ;
965
+ if let Some ( cached_block) = * cached_block {
966
+ cached_block
967
+ } else {
968
+ let block = cfg. start_new_cleanup_block ( ) ;
969
+ cfg. push_end_region ( tcx, block, source_info ( span) , scope. region_scope ) ;
970
+ cfg. terminate ( block, source_info ( span) , TerminatorKind :: Goto { target : target } ) ;
971
+ * cached_block = Some ( block) ;
972
+ block
973
+ }
974
+ } ;
975
+
976
+ debug ! ( "build_diverge_scope({:?}, {:?}) = {:?}" , scope, span, target) ;
944
977
945
978
target
946
979
}
0 commit comments