Skip to content

Commit 87a8e8e

Browse files
committed
Auto merge of #45359 - arielb1:escaping-borrow, r=eddyb
Fix a few bugs in drop generation This fixes a few bugs in drop generation, one of which causes spurious MIR borrowck errors. Fixes #44832. r? @eddyb
2 parents 95272a0 + 0caba17 commit 87a8e8e

File tree

3 files changed

+127
-55
lines changed

3 files changed

+127
-55
lines changed

src/librustc_mir/build/scope.rs

+86-53
Original file line numberDiff line numberDiff line change
@@ -131,6 +131,9 @@ pub struct Scope<'tcx> {
131131

132132
/// The cache for drop chain on "generator drop" exit.
133133
cached_generator_drop: Option<BasicBlock>,
134+
135+
/// The cache for drop chain on "unwind" exit.
136+
cached_unwind: CachedBlock,
134137
}
135138

136139
#[derive(Debug)]
@@ -154,6 +157,11 @@ struct CachedBlock {
154157
unwind: Option<BasicBlock>,
155158

156159
/// The cached block for unwinds during cleanups-on-generator-drop path
160+
///
161+
/// This is split from the standard unwind path here to prevent drop
162+
/// elaboration from creating drop flags that would have to be captured
163+
/// by the generator. I'm not sure how important this optimization is,
164+
/// but it is here.
157165
generator_drop: Option<BasicBlock>,
158166
}
159167

@@ -217,34 +225,29 @@ impl<'tcx> Scope<'tcx> {
217225
/// Should always be run for all inner scopes when a drop is pushed into some scope enclosing a
218226
/// larger extent of code.
219227
///
220-
/// `unwind` controls whether caches for the unwind branch are also invalidated.
221-
fn invalidate_cache(&mut self, unwind: bool) {
228+
/// `storage_only` controls whether to invalidate only drop paths run `StorageDead`.
229+
/// `this_scope_only` controls whether to invalidate only drop paths that refer to the current
230+
/// top-of-scope (as opposed to dependent scopes).
231+
fn invalidate_cache(&mut self, storage_only: bool, this_scope_only: bool) {
232+
// FIXME: maybe do shared caching of `cached_exits` etc. to handle functions
233+
// with lots of `try!`?
234+
235+
// cached exits drop storage and refer to the top-of-scope
222236
self.cached_exits.clear();
223-
if !unwind { return; }
224-
for dropdata in &mut self.drops {
225-
if let DropKind::Value { ref mut cached_block } = dropdata.kind {
226-
cached_block.invalidate();
227-
}
237+
238+
if !storage_only {
239+
// the current generator drop and unwind ignore
240+
// storage but refer to top-of-scope
241+
self.cached_generator_drop = None;
242+
self.cached_unwind.invalidate();
228243
}
229-
}
230244

231-
/// Returns the cached entrypoint for diverging exit from this scope.
232-
///
233-
/// Precondition: the caches must be fully filled (i.e. diverge_cleanup is called) in order for
234-
/// this method to work correctly.
235-
fn cached_block(&self, generator_drop: bool) -> Option<BasicBlock> {
236-
let mut drops = self.drops.iter().rev().filter_map(|data| {
237-
match data.kind {
238-
DropKind::Value { cached_block } => {
239-
Some(cached_block.get(generator_drop))
245+
if !storage_only && !this_scope_only {
246+
for dropdata in &mut self.drops {
247+
if let DropKind::Value { ref mut cached_block } = dropdata.kind {
248+
cached_block.invalidate();
240249
}
241-
DropKind::Storage => None
242250
}
243-
});
244-
if let Some(cached_block) = drops.next() {
245-
Some(cached_block.expect("drop cache is not filled"))
246-
} else {
247-
None
248251
}
249252
}
250253

@@ -356,7 +359,8 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
356359
needs_cleanup: false,
357360
drops: vec![],
358361
cached_generator_drop: None,
359-
cached_exits: FxHashMap()
362+
cached_exits: FxHashMap(),
363+
cached_unwind: CachedBlock::default(),
360364
});
361365
}
362366

@@ -482,15 +486,16 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
482486
TerminatorKind::Goto { target: b });
483487
b
484488
};
489+
490+
// End all regions for scopes out of which we are breaking.
491+
self.cfg.push_end_region(self.hir.tcx(), block, src_info, scope.region_scope);
492+
485493
unpack!(block = build_scope_drops(&mut self.cfg,
486494
scope,
487495
rest,
488496
block,
489497
self.arg_count,
490498
true));
491-
492-
// End all regions for scopes out of which we are breaking.
493-
self.cfg.push_end_region(self.hir.tcx(), block, src_info, scope.region_scope);
494499
}
495500

496501
self.cfg.terminate(block, src_info, TerminatorKind::GeneratorDrop);
@@ -672,8 +677,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
672677
// invalidating caches of each scope visited. This way bare minimum of the
673678
// caches gets invalidated. i.e. if a new drop is added into the middle scope, the
674679
// cache of outer scpoe stays intact.
675-
let invalidate_unwind = needs_drop && !this_scope;
676-
scope.invalidate_cache(invalidate_unwind);
680+
scope.invalidate_cache(!needs_drop, this_scope);
677681
if this_scope {
678682
if let DropKind::Value { .. } = drop_kind {
679683
scope.needs_cleanup = true;
@@ -819,30 +823,50 @@ fn build_scope_drops<'tcx>(cfg: &mut CFG<'tcx>,
819823
generator_drop: bool)
820824
-> BlockAnd<()> {
821825
debug!("build_scope_drops({:?} -> {:?})", block, scope);
822-
let mut iter = scope.drops.iter().rev().peekable();
826+
let mut iter = scope.drops.iter().rev();
823827
while let Some(drop_data) = iter.next() {
824828
let source_info = scope.source_info(drop_data.span);
825829
match drop_data.kind {
826830
DropKind::Value { .. } => {
827-
// Try to find the next block with its cached block
828-
// for us to diverge into in case the drop panics.
829-
let on_diverge = iter.peek().iter().filter_map(|dd| {
831+
// Try to find the next block with its cached block for us to
832+
// diverge into, either a previous block in this current scope or
833+
// the top of the previous scope.
834+
//
835+
// If it wasn't for EndRegion, we could just chain all the DropData
836+
// together and pick the first DropKind::Value. Please do that
837+
// when we replace EndRegion with NLL.
838+
let on_diverge = iter.clone().filter_map(|dd| {
830839
match dd.kind {
831-
DropKind::Value { cached_block } => {
832-
let result = cached_block.get(generator_drop);
833-
if result.is_none() {
834-
span_bug!(drop_data.span, "cached block not present?")
835-
}
836-
result
837-
},
840+
DropKind::Value { cached_block } => Some(cached_block),
838841
DropKind::Storage => None
839842
}
840-
}).next();
841-
// If there’s no `cached_block`s within current scope,
842-
// we must look for one in the enclosing scope.
843-
let on_diverge = on_diverge.or_else(|| {
844-
earlier_scopes.iter().rev().flat_map(|s| s.cached_block(generator_drop)).next()
843+
}).next().or_else(|| {
844+
if earlier_scopes.iter().any(|scope| scope.needs_cleanup) {
845+
// If *any* scope requires cleanup code to be run,
846+
// we must use the cached unwind from the *topmost*
847+
// scope, to ensure all EndRegions from surrounding
848+
// scopes are executed before the drop code runs.
849+
Some(earlier_scopes.last().unwrap().cached_unwind)
850+
} else {
851+
// We don't need any further cleanup, so return None
852+
// to avoid creating a landing pad. We can skip
853+
// EndRegions because all local regions end anyway
854+
// when the function unwinds.
855+
//
856+
// This is an important optimization because LLVM is
857+
// terrible at optimizing landing pads. FIXME: I think
858+
// it would be cleaner and better to do this optimization
859+
// in SimplifyCfg instead of here.
860+
None
861+
}
862+
});
863+
864+
let on_diverge = on_diverge.map(|cached_block| {
865+
cached_block.get(generator_drop).unwrap_or_else(|| {
866+
span_bug!(drop_data.span, "cached block not present?")
867+
})
845868
});
869+
846870
let next = cfg.start_new_block();
847871
cfg.terminate(block, source_info, TerminatorKind::Drop {
848872
location: drop_data.location.clone(),
@@ -933,14 +957,23 @@ fn build_diverge_scope<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>,
933957
};
934958
}
935959

936-
// Finally, push the EndRegion block, used by mir-borrowck. (Block
937-
// becomes trivial goto after pass that removes all EndRegions.)
938-
{
939-
let block = cfg.start_new_cleanup_block();
940-
cfg.push_end_region(tcx, block, source_info(span), scope.region_scope);
941-
cfg.terminate(block, source_info(span), TerminatorKind::Goto { target: target });
942-
target = block
943-
}
960+
// Finally, push the EndRegion block, used by mir-borrowck, and set
961+
// `cached_unwind` to point to it (Block becomes trivial goto after
962+
// pass that removes all EndRegions).
963+
target = {
964+
let cached_block = scope.cached_unwind.ref_mut(generator_drop);
965+
if let Some(cached_block) = *cached_block {
966+
cached_block
967+
} else {
968+
let block = cfg.start_new_cleanup_block();
969+
cfg.push_end_region(tcx, block, source_info(span), scope.region_scope);
970+
cfg.terminate(block, source_info(span), TerminatorKind::Goto { target: target });
971+
*cached_block = Some(block);
972+
block
973+
}
974+
};
975+
976+
debug!("build_diverge_scope({:?}, {:?}) = {:?}", scope, span, target);
944977

945978
target
946979
}

src/test/compile-fail/borrowck/borrowck-unary-move.rs

+6-1
Original file line numberDiff line numberDiff line change
@@ -8,10 +8,15 @@
88
// option. This file may not be copied, modified, or distributed
99
// except according to those terms.
1010

11+
// ignore-tidy-linelength
12+
// revisions: ast mir
13+
//[mir]compile-flags: -Z emit-end-regions -Z borrowck-mir
1114

1215
fn foo(x: Box<isize>) -> isize {
1316
let y = &*x;
14-
free(x); //~ ERROR cannot move out of `x` because it is borrowed
17+
free(x); //[ast]~ ERROR cannot move out of `x` because it is borrowed
18+
//[mir]~^ ERROR cannot move out of `x` because it is borrowed (Ast)
19+
//[mir]~| ERROR cannot move out of `x` because it is borrowed (Mir)
1520
*y
1621
}
1722

src/test/run-pass/dynamic-drop.rs

+35-1
Original file line numberDiff line numberDiff line change
@@ -8,9 +8,10 @@
88
// option. This file may not be copied, modified, or distributed
99
// except according to those terms.
1010

11-
#![feature(untagged_unions)]
11+
#![feature(generators, generator_trait, untagged_unions)]
1212

1313
use std::cell::{Cell, RefCell};
14+
use std::ops::Generator;
1415
use std::panic;
1516
use std::usize;
1617

@@ -161,6 +162,32 @@ fn vec_simple(a: &Allocator) {
161162
let _x = vec![a.alloc(), a.alloc(), a.alloc(), a.alloc()];
162163
}
163164

165+
fn generator(a: &Allocator, run_count: usize) {
166+
assert!(run_count < 4);
167+
168+
let mut gen = || {
169+
(a.alloc(),
170+
yield a.alloc(),
171+
a.alloc(),
172+
yield a.alloc()
173+
);
174+
};
175+
for _ in 0..run_count {
176+
gen.resume();
177+
}
178+
}
179+
180+
fn mixed_drop_and_nondrop(a: &Allocator) {
181+
// check that destructor panics handle drop
182+
// and non-drop blocks in the same scope correctly.
183+
//
184+
// Surprisingly enough, this used to not work.
185+
let (x, y, z);
186+
x = a.alloc();
187+
y = 5;
188+
z = a.alloc();
189+
}
190+
164191
#[allow(unreachable_code)]
165192
fn vec_unreachable(a: &Allocator) {
166193
let _x = vec![a.alloc(), a.alloc(), a.alloc(), return];
@@ -228,5 +255,12 @@ fn main() {
228255
run_test(|a| field_assignment(a, false));
229256
run_test(|a| field_assignment(a, true));
230257

258+
run_test(|a| generator(a, 0));
259+
run_test(|a| generator(a, 1));
260+
run_test(|a| generator(a, 2));
261+
run_test(|a| generator(a, 3));
262+
263+
run_test(|a| mixed_drop_and_nondrop(a));
264+
231265
run_test_nopanic(|a| union1(a));
232266
}

0 commit comments

Comments
 (0)