Skip to content

Commit 64c1aef

Browse files
authored
Rollup merge of rust-lang#35751 - nagisa:mir-scope-fix-again, r=eddyb
Fix the invalidation of the MIR early exit cache ~~The rust-lang#34307 introduced a cache for early exits in order to help with O(n*m) explosion of cleanup blocks but the cache is invalidated incorrectly and I can’t seem to figure out why (caching is hard!)~~ ~~Remove the cache for now to fix the immediate correctness issue and worry about the performance later.~~ Cache invalidation got fixed. Fixes rust-lang#35737 r? @nikomatsakis
2 parents 1e13de8 + 2c3250a commit 64c1aef

File tree

2 files changed

+96
-20
lines changed

2 files changed

+96
-20
lines changed

src/librustc_mir/build/scope.rs

Lines changed: 59 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -198,8 +198,11 @@ impl<'tcx> Scope<'tcx> {
198198
///
199199
/// Should always be run for all inner scopes when a drop is pushed into some scope enclosing a
200200
/// larger extent of code.
201-
fn invalidate_cache(&mut self) {
202-
self.cached_exits = FnvHashMap();
201+
///
202+
/// `unwind` controls whether caches for the unwind branch are also invalidated.
203+
fn invalidate_cache(&mut self, unwind: bool) {
204+
self.cached_exits.clear();
205+
if !unwind { return; }
203206
for dropdata in &mut self.drops {
204207
if let DropKind::Value { ref mut cached_block } = dropdata.kind {
205208
*cached_block = None;
@@ -455,25 +458,65 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
455458
};
456459

457460
for scope in self.scopes.iter_mut().rev() {
458-
if scope.extent == extent {
461+
let this_scope = scope.extent == extent;
462+
// When building drops, we try to cache chains of drops in such a way so these drops
463+
// could be reused by the drops which would branch into the cached (already built)
464+
// blocks. This, however, means that whenever we add a drop into a scope which already
465+
// had some blocks built (and thus, cached) for it, we must invalidate all caches which
466+
// might branch into the scope which had a drop just added to it. This is necessary,
467+
// because otherwise some other code might use the cache to branch into already built
468+
// chain of drops, essentially ignoring the newly added drop.
469+
//
470+
// For example consider there’s two scopes with a drop in each. These are built and
471+
// thus the caches are filled:
472+
//
473+
// +--------------------------------------------------------+
474+
// | +---------------------------------+ |
475+
// | | +--------+ +-------------+ | +---------------+ |
476+
// | | | return | <-+ | drop(outer) | <-+ | drop(middle) | |
477+
// | | +--------+ +-------------+ | +---------------+ |
478+
// | +------------|outer_scope cache|--+ |
479+
// +------------------------------|middle_scope cache|------+
480+
//
481+
// Now, a new, inner-most scope is added along with a new drop into both inner-most and
482+
// outer-most scopes:
483+
//
484+
// +------------------------------------------------------------+
485+
// | +----------------------------------+ |
486+
// | | +--------+ +-------------+ | +---------------+ | +-------------+
487+
// | | | return | <+ | drop(new) | <-+ | drop(middle) | <--+| drop(inner) |
488+
// | | +--------+ | | drop(outer) | | +---------------+ | +-------------+
489+
// | | +-+ +-------------+ | |
490+
// | +---|invalid outer_scope cache|----+ |
491+
// +----=----------------|invalid middle_scope cache|-----------+
492+
//
493+
// If, when adding `drop(new)` we do not invalidate the cached blocks for both
494+
// outer_scope and middle_scope, then, when building drops for the inner (right-most)
495+
// scope, the old, cached blocks, without `drop(new)` will get used, producing the
496+
// wrong results.
497+
//
498+
// The cache and its invalidation for unwind branch is somewhat special. The cache is
499+
// per-drop, rather than per scope, which has a several different implications. Adding
500+
// a new drop into a scope will not invalidate cached blocks of the prior drops in the
501+
// scope. That is true, because none of the already existing drops will have an edge
502+
// into a block with the newly added drop.
503+
//
504+
// Note that this code iterates scopes from the inner-most to the outer-most,
505+
// invalidating caches of each scope visited. This way bare minimum of the
506+
// caches gets invalidated. i.e. if a new drop is added into the middle scope, the
507+
// cache of outer scpoe stays intact.
508+
let invalidate_unwind = needs_drop && !this_scope;
509+
scope.invalidate_cache(invalidate_unwind);
510+
if this_scope {
459511
if let DropKind::Value { .. } = drop_kind {
460512
scope.needs_cleanup = true;
461513
}
462-
463-
// No need to invalidate any caches here. The just-scheduled drop will branch into
464-
// the drop that comes before it in the vector.
465514
scope.drops.push(DropData {
466515
span: span,
467516
location: lvalue.clone(),
468517
kind: drop_kind
469518
});
470519
return;
471-
} else {
472-
// We must invalidate all the cached_blocks leading up to the scope we’re
473-
// looking for, because all of the blocks in the chain will become incorrect.
474-
if let DropKind::Value { .. } = drop_kind {
475-
scope.invalidate_cache()
476-
}
477520
}
478521
}
479522
span_bug!(span, "extent {:?} not in scope to drop {:?}", extent, lvalue);
@@ -490,11 +533,12 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
490533
value: &Lvalue<'tcx>,
491534
item_ty: Ty<'tcx>) {
492535
for scope in self.scopes.iter_mut().rev() {
536+
// See the comment in schedule_drop above. The primary difference is that we invalidate
537+
// the unwind blocks unconditionally. That’s because the box free may be considered
538+
// outer-most cleanup within the scope.
539+
scope.invalidate_cache(true);
493540
if scope.extent == extent {
494541
assert!(scope.free.is_none(), "scope already has a scheduled free!");
495-
// We also must invalidate the caches in the scope for which the free is scheduled
496-
// because the drops must branch into the free we schedule here.
497-
scope.invalidate_cache();
498542
scope.needs_cleanup = true;
499543
scope.free = Some(FreeData {
500544
span: span,
@@ -503,11 +547,6 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
503547
cached_block: None
504548
});
505549
return;
506-
} else {
507-
// We must invalidate all the cached_blocks leading up to the scope we’re looking
508-
// for, because otherwise some/most of the blocks in the chain will become
509-
// incorrect.
510-
scope.invalidate_cache();
511550
}
512551
}
513552
span_bug!(span, "extent {:?} not in scope to free {:?}", extent, value);
Lines changed: 37 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,37 @@
1+
// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
2+
// file at the top-level directory of this distribution and at
3+
// http://rust-lang.org/COPYRIGHT.
4+
//
5+
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6+
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7+
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8+
// option. This file may not be copied, modified, or distributed
9+
// except according to those terms.
10+
11+
static mut DROP: bool = false;
12+
13+
struct ConnWrap(Conn);
14+
impl ::std::ops::Deref for ConnWrap {
15+
type Target=Conn;
16+
fn deref(&self) -> &Conn { &self.0 }
17+
}
18+
19+
struct Conn;
20+
impl Drop for Conn {
21+
fn drop(&mut self) { unsafe { DROP = true; } }
22+
}
23+
24+
fn inner() {
25+
let conn = &*match Some(ConnWrap(Conn)) {
26+
Some(val) => val,
27+
None => return,
28+
};
29+
return;
30+
}
31+
32+
fn main() {
33+
inner();
34+
unsafe {
35+
assert_eq!(DROP, true);
36+
}
37+
}

0 commit comments

Comments
 (0)