From 6690cfe25f1ea4a430cc08efbe4a4d4ef0b736b2 Mon Sep 17 00:00:00 2001 From: Josh Driver Date: Tue, 24 Jan 2017 01:31:49 +1030 Subject: [PATCH 01/18] Make builtin derives a SyntaxExtension This allows builtin derives to be registered and resolved, just like other derive types. --- src/libsyntax/ext/base.rs | 8 ++++- src/libsyntax/ext/expand.rs | 4 +-- src/libsyntax_ext/deriving/decodable.rs | 1 + src/libsyntax_ext/deriving/encodable.rs | 1 + src/libsyntax_ext/deriving/mod.rs | 42 ++++++++++++------------- src/libsyntax_ext/lib.rs | 2 ++ 6 files changed, 34 insertions(+), 24 deletions(-) diff --git a/src/libsyntax/ext/base.rs b/src/libsyntax/ext/base.rs index edf74e1fe19f1..231e2e6205cf8 100644 --- a/src/libsyntax/ext/base.rs +++ b/src/libsyntax/ext/base.rs @@ -10,7 +10,7 @@ pub use self::SyntaxExtension::{MultiDecorator, MultiModifier, NormalTT, IdentTT}; -use ast::{self, Attribute, Name, PatKind}; +use ast::{self, Attribute, Name, PatKind, MetaItem}; use attr::HasAttrs; use codemap::{self, CodeMap, ExpnInfo, Spanned, respan}; use syntax_pos::{Span, ExpnId, NO_EXPANSION}; @@ -471,6 +471,9 @@ impl MacResult for DummyResult { } } +pub type BuiltinDeriveFn = + for<'cx> fn(&'cx mut ExtCtxt, Span, &MetaItem, &Annotatable, &mut FnMut(Annotatable)); + /// An enum representing the different kinds of syntax extensions. pub enum SyntaxExtension { /// A syntax extension that is attached to an item and creates new items @@ -508,6 +511,9 @@ pub enum SyntaxExtension { IdentTT(Box, Option, bool), CustomDerive(Box), + + /// An attribute-like procedural macro that derives a builtin trait. + BuiltinDerive(BuiltinDeriveFn), } pub type NamedSyntaxExtension = (Name, SyntaxExtension); diff --git a/src/libsyntax/ext/expand.rs b/src/libsyntax/ext/expand.rs index 226625ebc8e5e..0e5d94e03810f 100644 --- a/src/libsyntax/ext/expand.rs +++ b/src/libsyntax/ext/expand.rs @@ -370,7 +370,7 @@ impl<'a, 'b> MacroExpander<'a, 'b> { let tok_result = mac.expand(self.cx, attr.span, attr_toks, item_toks); self.parse_expansion(tok_result, kind, name, attr.span) } - SyntaxExtension::CustomDerive(_) => { + SyntaxExtension::CustomDerive(..) | SyntaxExtension::BuiltinDerive(..) => { self.cx.span_err(attr.span, &format!("`{}` is a derive mode", name)); kind.dummy(attr.span) } @@ -440,7 +440,7 @@ impl<'a, 'b> MacroExpander<'a, 'b> { return kind.dummy(span); } - SyntaxExtension::CustomDerive(..) => { + SyntaxExtension::CustomDerive(..) | SyntaxExtension::BuiltinDerive(..) => { self.cx.span_err(path.span, &format!("`{}` is a derive mode", extname)); return kind.dummy(span); } diff --git a/src/libsyntax_ext/deriving/decodable.rs b/src/libsyntax_ext/deriving/decodable.rs index e2634c60dcaad..6359d642d157c 100644 --- a/src/libsyntax_ext/deriving/decodable.rs +++ b/src/libsyntax_ext/deriving/decodable.rs @@ -35,6 +35,7 @@ pub fn expand_deriving_decodable(cx: &mut ExtCtxt, mitem: &MetaItem, item: &Annotatable, push: &mut FnMut(Annotatable)) { + deriving::warn_if_deprecated(cx, span, "Decodable"); expand_deriving_decodable_imp(cx, span, mitem, item, push, "serialize") } diff --git a/src/libsyntax_ext/deriving/encodable.rs b/src/libsyntax_ext/deriving/encodable.rs index 092738ab8a03d..a276193e81b97 100644 --- a/src/libsyntax_ext/deriving/encodable.rs +++ b/src/libsyntax_ext/deriving/encodable.rs @@ -112,6 +112,7 @@ pub fn expand_deriving_encodable(cx: &mut ExtCtxt, mitem: &MetaItem, item: &Annotatable, push: &mut FnMut(Annotatable)) { + deriving::warn_if_deprecated(cx, span, "Encodable"); expand_deriving_encodable_imp(cx, span, mitem, item, push, "serialize") } diff --git a/src/libsyntax_ext/deriving/mod.rs b/src/libsyntax_ext/deriving/mod.rs index 096f6dfd5d8d8..30d0da588a5df 100644 --- a/src/libsyntax_ext/deriving/mod.rs +++ b/src/libsyntax_ext/deriving/mod.rs @@ -10,10 +10,11 @@ //! The compiler code necessary to implement the `#[derive]` extensions. +use std::rc::Rc; use syntax::ast::{self, MetaItem}; use syntax::attr::HasAttrs; use syntax::codemap; -use syntax::ext::base::{Annotatable, ExtCtxt, SyntaxExtension}; +use syntax::ext::base::{Annotatable, ExtCtxt, SyntaxExtension, Resolver}; use syntax::ext::build::AstBuilder; use syntax::feature_gate; use syntax::ptr::P; @@ -292,7 +293,10 @@ pub fn expand_derive(cx: &mut ExtCtxt, for titem in traits.iter() { let tname = titem.word().unwrap().name(); let name = Symbol::intern(&format!("derive({})", tname)); + let tname_cx = ast::Ident::with_empty_ctxt(titem.name().unwrap()); let mitem = cx.meta_word(titem.span, name); + let path = ast::Path::from_ident(titem.span, tname_cx); + let ext = cx.resolver.resolve_macro(cx.current_expansion.mark, &path, false).unwrap(); let span = Span { expn_id: cx.codemap().record_expansion(codemap::ExpnInfo { @@ -306,11 +310,15 @@ pub fn expand_derive(cx: &mut ExtCtxt, ..titem.span }; - let my_item = Annotatable::Item(item); - expand_builtin(&tname.as_str(), cx, span, &mitem, &my_item, &mut |a| { - items.push(a); - }); - item = my_item.expect_item(); + if let SyntaxExtension::BuiltinDerive(ref func) = *ext { + let my_item = Annotatable::Item(item); + func(cx, span, &mitem, &my_item, &mut |a| { + items.push(a) + }); + item = my_item.expect_item(); + } else { + unreachable!(); + } } items.insert(0, Annotatable::Item(item)); @@ -326,21 +334,13 @@ macro_rules! derive_traits { } } - fn expand_builtin(name: &str, - ecx: &mut ExtCtxt, - span: Span, - mitem: &MetaItem, - item: &Annotatable, - push: &mut FnMut(Annotatable)) { - match name { - $( - $name => { - warn_if_deprecated(ecx, span, $name); - $func(ecx, span, mitem, item, push); - } - )* - _ => panic!("not a builtin derive mode: {}", name), - } + pub fn register_builtin_derives(resolver: &mut Resolver) { + $( + resolver.add_ext( + ast::Ident::with_empty_ctxt(Symbol::intern($name)), + Rc::new(SyntaxExtension::BuiltinDerive($func)) + ); + )* } } } diff --git a/src/libsyntax_ext/lib.rs b/src/libsyntax_ext/lib.rs index ebec23d0901a0..e872cfaeacb7b 100644 --- a/src/libsyntax_ext/lib.rs +++ b/src/libsyntax_ext/lib.rs @@ -57,6 +57,8 @@ use syntax::symbol::Symbol; pub fn register_builtins(resolver: &mut syntax::ext::base::Resolver, user_exts: Vec, enable_quotes: bool) { + deriving::register_builtin_derives(resolver); + let mut register = |name, ext| { resolver.add_ext(ast::Ident::with_empty_ctxt(name), Rc::new(ext)); }; From bcf859c589c10e6c10c2d39fa18846b98a1743dc Mon Sep 17 00:00:00 2001 From: Josh Driver Date: Tue, 24 Jan 2017 08:55:08 +1030 Subject: [PATCH 02/18] Rename CustomDerive to ProcMacroDerive for macros 1.1 --- src/librustc_metadata/creader.rs | 6 +++--- src/librustc_resolve/build_reduced_graph.rs | 2 +- src/libsyntax/ext/base.rs | 6 +++++- src/libsyntax/ext/expand.rs | 4 ++-- src/libsyntax_ext/deriving/custom.rs | 18 +++++++++--------- src/libsyntax_ext/deriving/mod.rs | 4 ++-- src/libsyntax_ext/proc_macro_registrar.rs | 8 ++++---- .../proc-macro/derive-bad.rs | 2 +- .../proc-macro/load-panic.rs | 2 +- .../proc-macro/no-macro-use-attr.rs | 2 +- src/test/compile-fail/no-link.rs | 2 +- src/test/ui/custom-derive/issue-36935.stderr | 2 +- 12 files changed, 31 insertions(+), 27 deletions(-) diff --git a/src/librustc_metadata/creader.rs b/src/librustc_metadata/creader.rs index 161331b1728bc..8cb123b54f167 100644 --- a/src/librustc_metadata/creader.rs +++ b/src/librustc_metadata/creader.rs @@ -577,7 +577,7 @@ impl<'a> CrateLoader<'a> { use proc_macro::TokenStream; use proc_macro::__internal::Registry; use rustc_back::dynamic_lib::DynamicLibrary; - use syntax_ext::deriving::custom::CustomDerive; + use syntax_ext::deriving::custom::ProcMacroDerive; use syntax_ext::proc_macro_impl::AttrProcMacro; let path = match dylib { @@ -609,8 +609,8 @@ impl<'a> CrateLoader<'a> { expand: fn(TokenStream) -> TokenStream, attributes: &[&'static str]) { let attrs = attributes.iter().cloned().map(Symbol::intern).collect(); - let derive = SyntaxExtension::CustomDerive( - Box::new(CustomDerive::new(expand, attrs)) + let derive = SyntaxExtension::ProcMacroDerive( + Box::new(ProcMacroDerive::new(expand, attrs)) ); self.0.push((Symbol::intern(trait_name), Rc::new(derive))); } diff --git a/src/librustc_resolve/build_reduced_graph.rs b/src/librustc_resolve/build_reduced_graph.rs index f74af416cde09..95e3c96a2522e 100644 --- a/src/librustc_resolve/build_reduced_graph.rs +++ b/src/librustc_resolve/build_reduced_graph.rs @@ -546,7 +546,7 @@ impl<'a> Resolver<'a> { "an `extern crate` loading macros must be at the crate root"); } else if !self.use_extern_macros && !used && self.session.cstore.dep_kind(module.def_id().unwrap().krate).macros_only() { - let msg = "custom derive crates and `#[no_link]` crates have no effect without \ + let msg = "proc macro crates and `#[no_link]` crates have no effect without \ `#[macro_use]`"; self.session.span_warn(item.span, msg); used = true; // Avoid the normal unused extern crate warning diff --git a/src/libsyntax/ext/base.rs b/src/libsyntax/ext/base.rs index 231e2e6205cf8..17b0b97468df8 100644 --- a/src/libsyntax/ext/base.rs +++ b/src/libsyntax/ext/base.rs @@ -510,7 +510,11 @@ pub enum SyntaxExtension { /// IdentTT(Box, Option, bool), - CustomDerive(Box), + /// An attribute-like procedural macro. TokenStream -> TokenStream. + /// The input is the annotated item. + /// Allows generating code to implement a Trait for a given struct + /// or enum item. + ProcMacroDerive(Box), /// An attribute-like procedural macro that derives a builtin trait. BuiltinDerive(BuiltinDeriveFn), diff --git a/src/libsyntax/ext/expand.rs b/src/libsyntax/ext/expand.rs index 0e5d94e03810f..01a8c215d47aa 100644 --- a/src/libsyntax/ext/expand.rs +++ b/src/libsyntax/ext/expand.rs @@ -370,7 +370,7 @@ impl<'a, 'b> MacroExpander<'a, 'b> { let tok_result = mac.expand(self.cx, attr.span, attr_toks, item_toks); self.parse_expansion(tok_result, kind, name, attr.span) } - SyntaxExtension::CustomDerive(..) | SyntaxExtension::BuiltinDerive(..) => { + SyntaxExtension::ProcMacroDerive(..) | SyntaxExtension::BuiltinDerive(..) => { self.cx.span_err(attr.span, &format!("`{}` is a derive mode", name)); kind.dummy(attr.span) } @@ -440,7 +440,7 @@ impl<'a, 'b> MacroExpander<'a, 'b> { return kind.dummy(span); } - SyntaxExtension::CustomDerive(..) | SyntaxExtension::BuiltinDerive(..) => { + SyntaxExtension::ProcMacroDerive(..) | SyntaxExtension::BuiltinDerive(..) => { self.cx.span_err(path.span, &format!("`{}` is a derive mode", extname)); return kind.dummy(span); } diff --git a/src/libsyntax_ext/deriving/custom.rs b/src/libsyntax_ext/deriving/custom.rs index 2ce6fc03f7731..e118ef1ea01f4 100644 --- a/src/libsyntax_ext/deriving/custom.rs +++ b/src/libsyntax_ext/deriving/custom.rs @@ -32,18 +32,18 @@ impl<'a> Visitor<'a> for MarkAttrs<'a> { fn visit_mac(&mut self, _mac: &Mac) {} } -pub struct CustomDerive { +pub struct ProcMacroDerive { inner: fn(TokenStream) -> TokenStream, attrs: Vec, } -impl CustomDerive { - pub fn new(inner: fn(TokenStream) -> TokenStream, attrs: Vec) -> CustomDerive { - CustomDerive { inner: inner, attrs: attrs } +impl ProcMacroDerive { + pub fn new(inner: fn(TokenStream) -> TokenStream, attrs: Vec) -> ProcMacroDerive { + ProcMacroDerive { inner: inner, attrs: attrs } } } -impl MultiItemModifier for CustomDerive { +impl MultiItemModifier for ProcMacroDerive { fn expand(&self, ecx: &mut ExtCtxt, span: Span, @@ -54,7 +54,7 @@ impl MultiItemModifier for CustomDerive { Annotatable::Item(item) => item, Annotatable::ImplItem(_) | Annotatable::TraitItem(_) => { - ecx.span_err(span, "custom derive attributes may only be \ + ecx.span_err(span, "proc_macro_derive attributes may only be \ applied to struct/enum items"); return Vec::new() } @@ -63,7 +63,7 @@ impl MultiItemModifier for CustomDerive { ItemKind::Struct(..) | ItemKind::Enum(..) => {}, _ => { - ecx.span_err(span, "custom derive attributes may only be \ + ecx.span_err(span, "proc_macro_derive attributes may only be \ applied to struct/enum items"); return Vec::new() } @@ -81,7 +81,7 @@ impl MultiItemModifier for CustomDerive { let stream = match res { Ok(stream) => stream, Err(e) => { - let msg = "custom derive attribute panicked"; + let msg = "proc_macro_derive attribute panicked"; let mut err = ecx.struct_span_fatal(span, msg); if let Some(s) = e.downcast_ref::() { err.help(&format!("message: {}", s)); @@ -100,7 +100,7 @@ impl MultiItemModifier for CustomDerive { Ok(new_items) => new_items, Err(_) => { // FIXME: handle this better - let msg = "custom derive produced unparseable tokens"; + let msg = "proc_macro_derive produced unparseable tokens"; ecx.struct_span_fatal(span, msg).emit(); panic!(FatalError); } diff --git a/src/libsyntax_ext/deriving/mod.rs b/src/libsyntax_ext/deriving/mod.rs index 30d0da588a5df..311b8ae41f8b9 100644 --- a/src/libsyntax_ext/deriving/mod.rs +++ b/src/libsyntax_ext/deriving/mod.rs @@ -163,7 +163,7 @@ pub fn expand_derive(cx: &mut ExtCtxt, if is_builtin_trait(tname) || { let derive_mode = ast::Path::from_ident(titem.span, ast::Ident::with_empty_ctxt(tname)); cx.resolver.resolve_macro(cx.current_expansion.mark, &derive_mode, false).map(|ext| { - if let SyntaxExtension::CustomDerive(_) = *ext { true } else { false } + if let SyntaxExtension::ProcMacroDerive(_) = *ext { true } else { false } }).unwrap_or(false) } { return true; @@ -249,7 +249,7 @@ pub fn expand_derive(cx: &mut ExtCtxt, ..mitem.span }; - if let SyntaxExtension::CustomDerive(ref ext) = *ext { + if let SyntaxExtension::ProcMacroDerive(ref ext) = *ext { return ext.expand(cx, span, &mitem, item); } else { unreachable!() diff --git a/src/libsyntax_ext/proc_macro_registrar.rs b/src/libsyntax_ext/proc_macro_registrar.rs index c8af16e9242f0..325f09a83ddab 100644 --- a/src/libsyntax_ext/proc_macro_registrar.rs +++ b/src/libsyntax_ext/proc_macro_registrar.rs @@ -27,7 +27,7 @@ use syntax_pos::{Span, DUMMY_SP}; use deriving; -struct CustomDerive { +struct ProcMacroDerive { trait_name: ast::Name, function_name: Ident, span: Span, @@ -40,7 +40,7 @@ struct AttrProcMacro { } struct CollectProcMacros<'a> { - derives: Vec, + derives: Vec, attr_macros: Vec, in_root: bool, handler: &'a errors::Handler, @@ -176,7 +176,7 @@ impl<'a> CollectProcMacros<'a> { }; if self.in_root && item.vis == ast::Visibility::Public { - self.derives.push(CustomDerive { + self.derives.push(ProcMacroDerive { span: item.span, trait_name: trait_name, function_name: item.ident, @@ -319,7 +319,7 @@ impl<'a> Visitor<'a> for CollectProcMacros<'a> { // } // } fn mk_registrar(cx: &mut ExtCtxt, - custom_derives: &[CustomDerive], + custom_derives: &[ProcMacroDerive], custom_attrs: &[AttrProcMacro]) -> P { let eid = cx.codemap().record_expansion(ExpnInfo { call_site: DUMMY_SP, diff --git a/src/test/compile-fail-fulldeps/proc-macro/derive-bad.rs b/src/test/compile-fail-fulldeps/proc-macro/derive-bad.rs index a5359946c09c2..bc4da9fee47ee 100644 --- a/src/test/compile-fail-fulldeps/proc-macro/derive-bad.rs +++ b/src/test/compile-fail-fulldeps/proc-macro/derive-bad.rs @@ -16,7 +16,7 @@ extern crate derive_bad; #[derive( A )] -//~^^ ERROR: custom derive produced unparseable tokens +//~^^ ERROR: proc_macro_derive produced unparseable tokens struct A; fn main() {} diff --git a/src/test/compile-fail-fulldeps/proc-macro/load-panic.rs b/src/test/compile-fail-fulldeps/proc-macro/load-panic.rs index f9906b650fb87..107273d012dd6 100644 --- a/src/test/compile-fail-fulldeps/proc-macro/load-panic.rs +++ b/src/test/compile-fail-fulldeps/proc-macro/load-panic.rs @@ -14,7 +14,7 @@ extern crate derive_panic; #[derive(A)] -//~^ ERROR: custom derive attribute panicked +//~^ ERROR: proc_macro_derive attribute panicked //~| HELP: message: nope! struct Foo; diff --git a/src/test/compile-fail-fulldeps/proc-macro/no-macro-use-attr.rs b/src/test/compile-fail-fulldeps/proc-macro/no-macro-use-attr.rs index f61b8b4073b6f..e47a4aefb5e0b 100644 --- a/src/test/compile-fail-fulldeps/proc-macro/no-macro-use-attr.rs +++ b/src/test/compile-fail-fulldeps/proc-macro/no-macro-use-attr.rs @@ -13,7 +13,7 @@ #![feature(rustc_attrs)] extern crate derive_a; -//~^ WARN custom derive crates and `#[no_link]` crates have no effect without `#[macro_use]` +//~^ WARN proc macro crates and `#[no_link]` crates have no effect without `#[macro_use]` #[rustc_error] fn main() {} //~ ERROR compilation successful diff --git a/src/test/compile-fail/no-link.rs b/src/test/compile-fail/no-link.rs index d8e7411bded2c..f74ff55e2c08e 100644 --- a/src/test/compile-fail/no-link.rs +++ b/src/test/compile-fail/no-link.rs @@ -12,7 +12,7 @@ #[no_link] extern crate empty_struct; -//~^ WARN custom derive crates and `#[no_link]` crates have no effect without `#[macro_use]` +//~^ WARN proc macro crates and `#[no_link]` crates have no effect without `#[macro_use]` fn main() { empty_struct::XEmpty1; //~ ERROR cannot find value `XEmpty1` in module `empty_struct` diff --git a/src/test/ui/custom-derive/issue-36935.stderr b/src/test/ui/custom-derive/issue-36935.stderr index 213366a307d40..ad1382cbc8e4b 100644 --- a/src/test/ui/custom-derive/issue-36935.stderr +++ b/src/test/ui/custom-derive/issue-36935.stderr @@ -1,4 +1,4 @@ -error: custom derive attribute panicked +error: proc_macro_derive attribute panicked --> $DIR/issue-36935.rs:17:15 | 17 | #[derive(Foo, Bar)] From d848f1d7821a22ec2462e08401f966074041c324 Mon Sep 17 00:00:00 2001 From: Niko Matsakis Date: Tue, 31 Jan 2017 04:35:17 -0500 Subject: [PATCH 03/18] rewrite the predecessors code to create a reduced graph The old code created a flat listing of "HIR -> WorkProduct" edges. While perfectly general, this could lead to a lot of repetition if the same HIR nodes affect many work-products. This is set to be a problem when we start to skip typeck, since we will be adding a lot more "work-product"-like nodes. The newer code uses an alternative strategy: it "reduces" the graph instead. Basically we walk the dep-graph and convert it to a DAG, where we only keep intermediate nodes if they are used by multiple work-products. This DAG does not contain the same set of nodes as the original graph, but it is guaranteed that (a) every output node is included in the graph and (b) the set of input nodes that can reach each output node is unchanged. (Input nodes are basically HIR nodes and foreign metadata; output nodes are nodes that have assocaited state which we will persist to disk in some way. These are assumed to be disjoint sets.) --- src/librustc_incremental/lib.rs | 3 + .../persist/dirty_clean.rs | 6 +- src/librustc_incremental/persist/load.rs | 155 ++++---- src/librustc_incremental/persist/preds.rs | 366 ------------------ .../persist/preds/compress/README.md | 48 +++ .../persist/preds/compress/classify/mod.rs | 139 +++++++ .../persist/preds/compress/classify/test.rs | 79 ++++ .../persist/preds/compress/construct.rs | 201 ++++++++++ .../persist/preds/compress/dag_id.rs | 33 ++ .../persist/preds/compress/mod.rs | 125 ++++++ .../persist/preds/compress/test.rs | 243 ++++++++++++ .../persist/preds/compress/test_macro.rs | 40 ++ src/librustc_incremental/persist/preds/mod.rs | 75 ++++ src/librustc_incremental/persist/save.rs | 49 ++- 14 files changed, 1095 insertions(+), 467 deletions(-) delete mode 100644 src/librustc_incremental/persist/preds.rs create mode 100644 src/librustc_incremental/persist/preds/compress/README.md create mode 100644 src/librustc_incremental/persist/preds/compress/classify/mod.rs create mode 100644 src/librustc_incremental/persist/preds/compress/classify/test.rs create mode 100644 src/librustc_incremental/persist/preds/compress/construct.rs create mode 100644 src/librustc_incremental/persist/preds/compress/dag_id.rs create mode 100644 src/librustc_incremental/persist/preds/compress/mod.rs create mode 100644 src/librustc_incremental/persist/preds/compress/test.rs create mode 100644 src/librustc_incremental/persist/preds/compress/test_macro.rs create mode 100644 src/librustc_incremental/persist/preds/mod.rs diff --git a/src/librustc_incremental/lib.rs b/src/librustc_incremental/lib.rs index a866a15c4d280..e3c339829f6a4 100644 --- a/src/librustc_incremental/lib.rs +++ b/src/librustc_incremental/lib.rs @@ -23,6 +23,9 @@ #![feature(staged_api)] #![feature(rand)] #![feature(core_intrinsics)] +#![feature(conservative_impl_trait)] +#![feature(field_init_shorthand)] +#![feature(pub_restricted)] extern crate graphviz; #[macro_use] extern crate rustc; diff --git a/src/librustc_incremental/persist/dirty_clean.rs b/src/librustc_incremental/persist/dirty_clean.rs index 6e66dac6470f0..798bc6e9f9856 100644 --- a/src/librustc_incremental/persist/dirty_clean.rs +++ b/src/librustc_incremental/persist/dirty_clean.rs @@ -67,9 +67,9 @@ pub fn check_dirty_clean_annotations<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, let _ignore = tcx.dep_graph.in_ignore(); let dirty_inputs: FxHashSet> = - dirty_inputs.iter() - .filter_map(|d| retraced.map(d)) - .collect(); + dirty_inputs.keys() + .filter_map(|d| retraced.map(d)) + .collect(); let query = tcx.dep_graph.query(); debug!("query-nodes: {:?}", query.nodes()); let krate = tcx.hir.krate(); diff --git a/src/librustc_incremental/persist/load.rs b/src/librustc_incremental/persist/load.rs index 433110a2a6de0..e9a59fd762972 100644 --- a/src/librustc_incremental/persist/load.rs +++ b/src/librustc_incremental/persist/load.rs @@ -10,7 +10,7 @@ //! Code to save/load the dep-graph from files. -use rustc::dep_graph::DepNode; +use rustc::dep_graph::{DepNode, WorkProductId}; use rustc::hir::def_id::DefId; use rustc::hir::svh::Svh; use rustc::session::Session; @@ -19,6 +19,7 @@ use rustc_data_structures::fx::{FxHashSet, FxHashMap}; use rustc_serialize::Decodable as RustcDecodable; use rustc_serialize::opaque::Decoder; use std::path::{Path}; +use std::sync::Arc; use IncrementalHashesMap; use ich::Fingerprint; @@ -30,7 +31,9 @@ use super::fs::*; use super::file_format; use super::work_product; -pub type DirtyNodes = FxHashSet>; +// The key is a dirty node. The value is **some** base-input that we +// can blame it on. +pub type DirtyNodes = FxHashMap, DepNode>; /// If we are in incremental mode, and a previous dep-graph exists, /// then load up those nodes/edges that are still valid into the @@ -152,83 +155,65 @@ pub fn decode_dep_graph<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, // Retrace the paths in the directory to find their current location (if any). let retraced = directory.retrace(tcx); - // Compute the set of Hir nodes whose data has changed or which - // have been removed. These are "raw" source nodes, which means - // that they still use the original `DefPathIndex` values from the - // encoding, rather than having been retraced to a `DefId`. The - // reason for this is that this way we can include nodes that have - // been removed (which no longer have a `DefId` in the current - // compilation). - let dirty_raw_source_nodes = dirty_nodes(tcx, - incremental_hashes_map, - &serialized_dep_graph.hashes, - &retraced); - - // Create a list of (raw-source-node -> - // retracted-target-node) edges. In the process of retracing the - // target nodes, we may discover some of them def-paths no longer exist, - // in which case there is no need to mark the corresopnding nodes as dirty - // (they are just not present). So this list may be smaller than the original. - // - // Note though that in the common case the target nodes are - // `DepNode::WorkProduct` instances, and those don't have a - // def-id, so they will never be considered to not exist. Instead, - // we do a secondary hashing step (later, in trans) when we know - // the set of symbols that go into a work-product: if any symbols - // have been removed (or added) the hash will be different and - // we'll ignore the work-product then. - let retraced_edges: Vec<_> = - serialized_dep_graph.edges.iter() - .filter_map(|&(ref raw_source_node, ref raw_target_node)| { - retraced.map(raw_target_node) - .map(|target_node| (raw_source_node, target_node)) - }) - .collect(); - - // Compute which work-products have an input that has changed or - // been removed. Put the dirty ones into a set. - let mut dirty_target_nodes = FxHashSet(); - for &(raw_source_node, ref target_node) in &retraced_edges { - if dirty_raw_source_nodes.contains(raw_source_node) { - if !dirty_target_nodes.contains(target_node) { - dirty_target_nodes.insert(target_node.clone()); - + // Compute the set of nodes from the old graph where some input + // has changed or been removed. These are "raw" source nodes, + // which means that they still use the original `DefPathIndex` + // values from the encoding, rather than having been retraced to a + // `DefId`. The reason for this is that this way we can include + // nodes that have been removed (which no longer have a `DefId` in + // the current compilation). + let dirty_raw_nodes = initial_dirty_nodes(tcx, + incremental_hashes_map, + &serialized_dep_graph.hashes, + &retraced); + let dirty_raw_nodes = transitive_dirty_nodes(&serialized_dep_graph.edges, dirty_raw_nodes); + + // Recreate the edges in the graph that are still clean. + let mut clean_work_products = FxHashSet(); + let mut dirty_work_products = FxHashSet(); // incomplete; just used to suppress debug output + for edge in &serialized_dep_graph.edges { + // If the target is dirty, skip the edge. If this is an edge + // that targets a work-product, we can print the blame + // information now. + if let Some(blame) = dirty_raw_nodes.get(&edge.1) { + if let DepNode::WorkProduct(ref wp) = edge.1 { if tcx.sess.opts.debugging_opts.incremental_info { - // It'd be nice to pretty-print these paths better than just - // using the `Debug` impls, but wev. - println!("incremental: module {:?} is dirty because {:?} \ - changed or was removed", - target_node, - raw_source_node.map_def(|&index| { - Some(directory.def_path_string(tcx, index)) - }).unwrap()); + if dirty_work_products.insert(wp.clone()) { + // It'd be nice to pretty-print these paths better than just + // using the `Debug` impls, but wev. + println!("incremental: module {:?} is dirty because {:?} \ + changed or was removed", + wp, + blame.map_def(|&index| { + Some(directory.def_path_string(tcx, index)) + }).unwrap()); + } } } - } - } - - // For work-products that are still clean, add their deps into the - // graph. This is needed because later we will have to save this - // back out again! - let dep_graph = tcx.dep_graph.clone(); - for (raw_source_node, target_node) in retraced_edges { - if dirty_target_nodes.contains(&target_node) { continue; } - let source_node = retraced.map(raw_source_node).unwrap(); - - debug!("decode_dep_graph: clean edge: {:?} -> {:?}", source_node, target_node); - - let _task = dep_graph.in_task(target_node); - dep_graph.read(source_node); + // If the source is dirty, the target will be dirty. + assert!(!dirty_raw_nodes.contains_key(&edge.0)); + + // Retrace the source -> target edges to def-ids and then + // create an edge in the graph. Retracing may yield none if + // some of the data happens to have been removed; this ought + // to be impossible unless it is dirty, so we can unwrap. + let source_node = retraced.map(&edge.0).unwrap(); + let target_node = retraced.map(&edge.1).unwrap(); + let _task = tcx.dep_graph.in_task(target_node); + tcx.dep_graph.read(source_node); + if let DepNode::WorkProduct(ref wp) = edge.1 { + clean_work_products.insert(wp.clone()); + } } // Add in work-products that are still clean, and delete those that are // dirty. - reconcile_work_products(tcx, work_products, &dirty_target_nodes); + reconcile_work_products(tcx, work_products, &clean_work_products); - dirty_clean::check_dirty_clean_annotations(tcx, &dirty_raw_source_nodes, &retraced); + dirty_clean::check_dirty_clean_annotations(tcx, &dirty_raw_nodes, &retraced); load_prev_metadata_hashes(tcx, &retraced, @@ -238,13 +223,13 @@ pub fn decode_dep_graph<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, /// Computes which of the original set of def-ids are dirty. Stored in /// a bit vector where the index is the DefPathIndex. -fn dirty_nodes<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, - incremental_hashes_map: &IncrementalHashesMap, - serialized_hashes: &[SerializedHash], - retraced: &RetracedDefIdDirectory) - -> DirtyNodes { +fn initial_dirty_nodes<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + incremental_hashes_map: &IncrementalHashesMap, + serialized_hashes: &[SerializedHash], + retraced: &RetracedDefIdDirectory) + -> DirtyNodes { let mut hcx = HashContext::new(tcx, incremental_hashes_map); - let mut dirty_nodes = FxHashSet(); + let mut dirty_nodes = FxHashMap(); for hash in serialized_hashes { if let Some(dep_node) = retraced.map(&hash.dep_node) { @@ -277,21 +262,37 @@ fn dirty_nodes<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, hash.dep_node); } - dirty_nodes.insert(hash.dep_node.clone()); + dirty_nodes.insert(hash.dep_node.clone(), hash.dep_node.clone()); } dirty_nodes } +fn transitive_dirty_nodes(edges: &[SerializedEdge], + mut dirty_nodes: DirtyNodes) + -> DirtyNodes +{ + let mut len = 0; + while len != dirty_nodes.len() { + len = dirty_nodes.len(); + for edge in edges { + if let Some(n) = dirty_nodes.get(&edge.0).cloned() { + dirty_nodes.insert(edge.1.clone(), n); + } + } + } + dirty_nodes +} + /// Go through the list of work-products produced in the previous run. /// Delete any whose nodes have been found to be dirty or which are /// otherwise no longer applicable. fn reconcile_work_products<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, work_products: Vec, - dirty_target_nodes: &FxHashSet>) { + clean_work_products: &FxHashSet>) { debug!("reconcile_work_products({:?})", work_products); for swp in work_products { - if dirty_target_nodes.contains(&DepNode::WorkProduct(swp.id.clone())) { + if !clean_work_products.contains(&swp.id) { debug!("reconcile_work_products: dep-node for {:?} is dirty", swp); delete_dirty_work_product(tcx, swp); } else { diff --git a/src/librustc_incremental/persist/preds.rs b/src/librustc_incremental/persist/preds.rs deleted file mode 100644 index 3daeacfe87d82..0000000000000 --- a/src/librustc_incremental/persist/preds.rs +++ /dev/null @@ -1,366 +0,0 @@ -// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use rustc::dep_graph::{DepGraphQuery, DepNode}; -use rustc::hir::def_id::DefId; -use rustc_data_structures::fx::FxHashMap; -use rustc_data_structures::bitvec::BitVector; -use rustc_data_structures::graph::{NodeIndex, Graph}; - -use super::hash::*; -use ich::Fingerprint; - -/// A data-structure that makes it easy to enumerate the hashable -/// predecessors of any given dep-node. -pub struct Predecessors<'query> { - // - Keys: dep-nodes that may have work-products, output meta-data - // nodes. - // - Values: transitive predecessors of the key that are hashable - // (e.g., HIR nodes, input meta-data nodes) - pub inputs: FxHashMap<&'query DepNode, Vec<&'query DepNode>>, - - // - Keys: some hashable node - // - Values: the hash thereof - pub hashes: FxHashMap<&'query DepNode, Fingerprint>, -} - -impl<'q> Predecessors<'q> { - pub fn new(query: &'q DepGraphQuery, hcx: &mut HashContext) -> Self { - let tcx = hcx.tcx; - - let collect_for_metadata = tcx.sess.opts.debugging_opts.incremental_cc || - tcx.sess.opts.debugging_opts.query_dep_graph; - - // Find nodes for which we want to know the full set of preds - let node_count = query.graph.len_nodes(); - - // Set up some data structures the cache predecessor search needs: - let mut visit_counts: Vec = Vec::new(); - let mut node_cache: Vec>> = Vec::new(); - visit_counts.resize(node_count, 0); - node_cache.resize(node_count, None); - let mut dfs_workspace1 = DfsWorkspace::new(node_count); - let mut dfs_workspace2 = DfsWorkspace::new(node_count); - - let inputs: FxHashMap<_, _> = query - .graph - .all_nodes() - .iter() - .enumerate() - .filter(|&(_, node)| match node.data { - DepNode::WorkProduct(_) => true, - DepNode::MetaData(ref def_id) => collect_for_metadata && def_id.is_local(), - - // if -Z query-dep-graph is passed, save more extended data - // to enable better unit testing - DepNode::TypeckTables(_) | - DepNode::TransCrateItem(_) => tcx.sess.opts.debugging_opts.query_dep_graph, - - _ => false, - }) - .map(|(node_index, node)| { - find_roots(&query.graph, - node_index as u32, - &mut visit_counts, - &mut node_cache[..], - HashContext::is_hashable, - &mut dfs_workspace1, - Some(&mut dfs_workspace2)); - - let inputs: Vec<_> = dfs_workspace1.output.nodes.iter().map(|&i| { - query.graph.node_data(NodeIndex(i as usize)) - }).collect(); - - (&node.data, inputs) - }) - .collect(); - - let mut hashes = FxHashMap(); - for input in inputs.values().flat_map(|v| v.iter().cloned()) { - hashes.entry(input) - .or_insert_with(|| hcx.hash(input).unwrap()); - } - - Predecessors { - inputs: inputs, - hashes: hashes, - } - } -} - -const CACHING_THRESHOLD: u32 = 60; - -// Starting at `start_node`, this function finds this node's "roots", that is, -// anything that is hashable, in the dep-graph. It uses a simple depth-first -// search to achieve that. However, since some sub-graphs are traversed over -// and over again, the function also some caching built into it: Each time it -// visits a node it increases a counter for that node. If a node has been -// visited more often than CACHING_THRESHOLD, the function will allocate a -// cache entry in the `cache` array. This cache entry contains a flat list of -// all roots reachable from the given node. The next time the node is visited, -// the search can just add the contents of this array to the output instead of -// recursing further. -// -// The function takes two `DfsWorkspace` arguments. These contains some data -// structures that would be expensive to re-allocate all the time, so they are -// allocated once up-front. There are two of them because building a cache entry -// requires a recursive invocation of this function. Two are enough though, -// since function never recurses more than once. -fn find_roots(graph: &Graph, - start_node: u32, - visit_counts: &mut [u32], - cache: &mut [Option>], - is_root: F, - workspace: &mut DfsWorkspace, - mut sub_workspace: Option<&mut DfsWorkspace>) - where F: Copy + Fn(&T) -> bool, - T: ::std::fmt::Debug, -{ - workspace.visited.clear(); - workspace.output.clear(); - workspace.stack.clear(); - workspace.stack.push(start_node); - - loop { - let node = match workspace.stack.pop() { - Some(node) => node, - None => return, - }; - - if !workspace.visited.insert(node as usize) { - continue - } - - if is_root(graph.node_data(NodeIndex(node as usize))) { - // If this is a root, just add it to the output. - workspace.output.insert(node); - } else { - if let Some(ref cached) = cache[node as usize] { - for &n in &cached[..] { - workspace.output.insert(n); - } - // No need to recurse further from this node - continue - } - - visit_counts[node as usize] += 1; - - // If this node has been visited often enough to be cached ... - if visit_counts[node as usize] > CACHING_THRESHOLD { - // ... we are actually allowed to cache something, do so: - if let Some(ref mut sub_workspace) = sub_workspace { - // Note that the following recursive invocation does never - // write to the cache (since we pass None as sub_workspace). - // This is intentional: The graph we are working with - // contains cycles and this prevent us from simply building - // our caches recursively on-demand. - // However, we can just do a regular, non-caching DFS to - // yield the set of roots and cache that. - find_roots(graph, - node, - visit_counts, - cache, - is_root, - sub_workspace, - None); - - for &n in &sub_workspace.output.nodes { - workspace.output.insert(n); - } - - cache[node as usize] = Some(sub_workspace.output - .nodes - .clone() - .into_boxed_slice()); - // No need to recurse further from this node - continue - } - } - - for pred in graph.predecessor_nodes(NodeIndex(node as usize)) { - workspace.stack.push(pred.node_id() as u32); - } - } - } -} - -struct DfsWorkspace { - stack: Vec, - visited: BitVector, - output: NodeIndexSet, -} - -impl DfsWorkspace { - fn new(total_node_count: usize) -> DfsWorkspace { - DfsWorkspace { - stack: Vec::new(), - visited: BitVector::new(total_node_count), - output: NodeIndexSet::new(total_node_count), - } - } -} - -struct NodeIndexSet { - bitset: BitVector, - nodes: Vec, -} - -impl NodeIndexSet { - fn new(total_node_count: usize) -> NodeIndexSet { - NodeIndexSet { - bitset: BitVector::new(total_node_count), - nodes: Vec::new(), - } - } - - #[inline] - fn clear(&mut self) { - self.bitset.clear(); - self.nodes.clear(); - } - - #[inline] - fn insert(&mut self, node: u32) { - if self.bitset.insert(node as usize) { - self.nodes.push(node) - } - } -} - -#[test] -fn test_cached_dfs_acyclic() { - - // 0 1 2 - // | \ / - // 3---+ | - // | | | - // | | | - // 4 5 6 - // \ / \ / \ - // | | | - // 7 8 9 - - let mut g: Graph = Graph::new(); - g.add_node(false); - g.add_node(false); - g.add_node(false); - g.add_node(false); - g.add_node(false); - g.add_node(false); - g.add_node(false); - g.add_node(true); - g.add_node(true); - g.add_node(true); - - g.add_edge(NodeIndex(3), NodeIndex(0), ()); - g.add_edge(NodeIndex(4), NodeIndex(3), ()); - g.add_edge(NodeIndex(7), NodeIndex(4), ()); - g.add_edge(NodeIndex(5), NodeIndex(3), ()); - g.add_edge(NodeIndex(7), NodeIndex(5), ()); - g.add_edge(NodeIndex(8), NodeIndex(5), ()); - g.add_edge(NodeIndex(8), NodeIndex(6), ()); - g.add_edge(NodeIndex(9), NodeIndex(6), ()); - g.add_edge(NodeIndex(6), NodeIndex(1), ()); - g.add_edge(NodeIndex(6), NodeIndex(2), ()); - - let mut ws1 = DfsWorkspace::new(g.len_nodes()); - let mut ws2 = DfsWorkspace::new(g.len_nodes()); - let mut visit_counts: Vec<_> = g.all_nodes().iter().map(|_| 0u32).collect(); - let mut cache: Vec>> = g.all_nodes().iter().map(|_| None).collect(); - - fn is_root(x: &bool) -> bool { *x } - - for _ in 0 .. CACHING_THRESHOLD + 1 { - find_roots(&g, 5, &mut visit_counts, &mut cache[..], is_root, &mut ws1, Some(&mut ws2)); - ws1.output.nodes.sort(); - assert_eq!(ws1.output.nodes, vec![7, 8]); - - find_roots(&g, 6, &mut visit_counts, &mut cache[..], is_root, &mut ws1, Some(&mut ws2)); - ws1.output.nodes.sort(); - assert_eq!(ws1.output.nodes, vec![8, 9]); - - find_roots(&g, 0, &mut visit_counts, &mut cache[..], is_root, &mut ws1, Some(&mut ws2)); - ws1.output.nodes.sort(); - assert_eq!(ws1.output.nodes, vec![7, 8]); - - find_roots(&g, 1, &mut visit_counts, &mut cache[..], is_root, &mut ws1, Some(&mut ws2)); - ws1.output.nodes.sort(); - assert_eq!(ws1.output.nodes, vec![8, 9]); - - find_roots(&g, 2, &mut visit_counts, &mut cache[..], is_root, &mut ws1, Some(&mut ws2)); - ws1.output.nodes.sort(); - assert_eq!(ws1.output.nodes, vec![8, 9]); - - find_roots(&g, 3, &mut visit_counts, &mut cache[..], is_root, &mut ws1, Some(&mut ws2)); - ws1.output.nodes.sort(); - assert_eq!(ws1.output.nodes, vec![7, 8]); - - find_roots(&g, 4, &mut visit_counts, &mut cache[..], is_root, &mut ws1, Some(&mut ws2)); - ws1.output.nodes.sort(); - assert_eq!(ws1.output.nodes, vec![7]); - } -} - -#[test] -fn test_cached_dfs_cyclic() { - - // 0 1 <---- 2 3 - // ^ | ^ ^ - // | v | | - // 4 ----> 5 ----> 6 ----> 7 - // ^ ^ ^ ^ - // | | | | - // 8 9 10 11 - - - let mut g: Graph = Graph::new(); - g.add_node(false); - g.add_node(false); - g.add_node(false); - g.add_node(false); - g.add_node(false); - g.add_node(false); - g.add_node(false); - g.add_node(false); - g.add_node(true); - g.add_node(true); - g.add_node(true); - g.add_node(true); - - g.add_edge(NodeIndex( 4), NodeIndex(0), ()); - g.add_edge(NodeIndex( 8), NodeIndex(4), ()); - g.add_edge(NodeIndex( 4), NodeIndex(5), ()); - g.add_edge(NodeIndex( 1), NodeIndex(5), ()); - g.add_edge(NodeIndex( 9), NodeIndex(5), ()); - g.add_edge(NodeIndex( 5), NodeIndex(6), ()); - g.add_edge(NodeIndex( 6), NodeIndex(2), ()); - g.add_edge(NodeIndex( 2), NodeIndex(1), ()); - g.add_edge(NodeIndex(10), NodeIndex(6), ()); - g.add_edge(NodeIndex( 6), NodeIndex(7), ()); - g.add_edge(NodeIndex(11), NodeIndex(7), ()); - g.add_edge(NodeIndex( 7), NodeIndex(3), ()); - - let mut ws1 = DfsWorkspace::new(g.len_nodes()); - let mut ws2 = DfsWorkspace::new(g.len_nodes()); - let mut visit_counts: Vec<_> = g.all_nodes().iter().map(|_| 0u32).collect(); - let mut cache: Vec>> = g.all_nodes().iter().map(|_| None).collect(); - - fn is_root(x: &bool) -> bool { *x } - - for _ in 0 .. CACHING_THRESHOLD + 1 { - find_roots(&g, 2, &mut visit_counts, &mut cache[..], is_root, &mut ws1, Some(&mut ws2)); - ws1.output.nodes.sort(); - assert_eq!(ws1.output.nodes, vec![8, 9, 10]); - - find_roots(&g, 3, &mut visit_counts, &mut cache[..], is_root, &mut ws1, Some(&mut ws2)); - ws1.output.nodes.sort(); - assert_eq!(ws1.output.nodes, vec![8, 9, 10, 11]); - } -} diff --git a/src/librustc_incremental/persist/preds/compress/README.md b/src/librustc_incremental/persist/preds/compress/README.md new file mode 100644 index 0000000000000..d2aa245c7c942 --- /dev/null +++ b/src/librustc_incremental/persist/preds/compress/README.md @@ -0,0 +1,48 @@ +Graph compression + +The graph compression algorithm is intended to remove and minimize the +size of the dependency graph so it can be saved, while preserving +everything we care about. In particular, given a set of input/output +nodes in the graph (which must be disjoint), we ensure that the set of +input nodes that can reach a given output node does not change, +although the intermediate nodes may change in various ways. In short, +the output nodes are intended to be the ones whose existence we care +about when we start up, because they have some associated data that we +will try to re-use (and hence if they are dirty, we have to throw that +data away). The other intermediate nodes don't really matter so much. + +### Overview + +The algorithm works as follows: + +1. Do a single walk of the graph to construct a DAG + - in this walk, we identify and unify all cycles, electing a representative "head" node + - this is done using the union-find implementation + - this code is found in the `classify` module +2. The result from this walk is a `Dag`: + - the set of SCCs, represented by the union-find table + - a set of edges in the new DAG, represented by: + - a vector of parent nodes for each child node + - a vector of cross-edges + - once these are canonicalized, some of these edges may turn out to be cyclic edges + (i.e., an edge A -> A where A is the head of some SCC) +3. We pass this `Dag` into the construct code, which then creates a + new graph. This graph has a smaller set of indices which includes + *at least* the inputs/outputs from the original graph, but may have + other nodes as well, if keeping them reduces the overall size of + the graph. + - This code is found in the `construct` module. + +### Some notes + +The input graph is assumed to have *read-by* edges. i.e., `A -> B` +means that the task B reads data from A. But the DAG defined by +classify is expressed in terms of *reads-from* edges, which are the +inverse. So `A -> B` is the same as `B -rf-> A`. *reads-from* edges +are more natural since we want to walk from the outputs to the inputs, +effectively. When we construct the final graph, we reverse these edges +back into the *read-by* edges common elsewhere. + + + + diff --git a/src/librustc_incremental/persist/preds/compress/classify/mod.rs b/src/librustc_incremental/persist/preds/compress/classify/mod.rs new file mode 100644 index 0000000000000..f75063f8b9c9a --- /dev/null +++ b/src/librustc_incremental/persist/preds/compress/classify/mod.rs @@ -0,0 +1,139 @@ +//! First phase. Detect cycles and cross-edges. + +use super::*; + +#[cfg(test)] +mod test; + +pub struct Classify<'a, 'g: 'a, N: 'g, I: 'a, O: 'a> + where N: Debug + Clone + 'g, + I: Fn(&N) -> bool, + O: Fn(&N) -> bool, +{ + r: &'a mut GraphReduce<'g, N, I, O>, + stack: Vec, + colors: Vec, + dag: Dag, +} + +#[derive(Copy, Clone, Debug, PartialEq)] +enum Color { + // not yet visited + White, + + // visiting; usize is index on stack + Grey(usize), + + // finished visiting + Black, +} + +impl<'a, 'g, N, I, O> Classify<'a, 'g, N, I, O> + where N: Debug + Clone + 'g, + I: Fn(&N) -> bool, + O: Fn(&N) -> bool, +{ + pub(super) fn new(r: &'a mut GraphReduce<'g, N, I, O>) -> Self { + Classify { + r: r, + colors: vec![Color::White; r.in_graph.len_nodes()], + stack: vec![], + dag: Dag { + parents: (0..r.in_graph.len_nodes()).map(|i| NodeIndex(i)).collect(), + cross_edges: vec![], + input_nodes: vec![], + output_nodes: vec![], + }, + } + } + + pub(super) fn walk(mut self) -> Dag { + for (index, node) in self.r.in_graph.all_nodes().iter().enumerate() { + if (self.r.is_output)(&node.data) { + let index = NodeIndex(index); + self.dag.output_nodes.push(index); + match self.colors[index.0] { + Color::White => self.open(index), + Color::Grey(_) => panic!("grey node but have not yet started a walk"), + Color::Black => (), // already visited, skip + } + } + } + + // At this point we've identifed all the cycles, and we've + // constructed a spanning tree over the original graph + // (encoded in `self.parents`) as well as a list of + // cross-edges that reflect additional edges from the DAG. + // + // If we converted each node to its `cycle-head` (a + // representative choice from each SCC, basically) and then + // take the union of `self.parents` and `self.cross_edges` + // (after canonicalization), that is basically our DAG. + // + // Note that both of those may well contain trivial `X -rf-> X` + // cycle edges after canonicalization, though. e.g., if you + // have a graph `{A -rf-> B, B -rf-> A}`, we will have unioned A and + // B, but A will also be B's parent (or vice versa), and hence + // when we canonicalize the parent edge it would become `A -rf-> + // A` (or `B -rf-> B`). + self.dag + } + + fn open(&mut self, node: NodeIndex) { + let index = self.stack.len(); + self.stack.push(node); + self.colors[node.0] = Color::Grey(index); + for child in self.r.inputs(node) { + self.walk_edge(node, child); + } + self.stack.pop().unwrap(); + self.colors[node.0] = Color::Black; + + if (self.r.is_input)(&self.r.in_graph.node_data(node)) { + // base inputs should have no inputs + assert!(self.r.inputs(node).next().is_none()); + debug!("input: `{:?}`", self.r.in_graph.node_data(node)); + self.dag.input_nodes.push(node); + } + } + + fn walk_edge(&mut self, parent: NodeIndex, child: NodeIndex) { + debug!("walk_edge: {:?} -rf-> {:?}, {:?}", + self.r.in_graph.node_data(parent), + self.r.in_graph.node_data(child), + self.colors[child.0]); + + // Ignore self-edges, just in case they exist. + if child == parent { + return; + } + + match self.colors[child.0] { + Color::White => { + // Not yet visited this node; start walking it. + assert_eq!(self.dag.parents[child.0], child); + self.dag.parents[child.0] = parent; + self.open(child); + } + + Color::Grey(stack_index) => { + // Back-edge; unify everything on stack between here and `stack_index` + // since we are all participating in a cycle + assert!(self.stack[stack_index] == child); + + for &n in &self.stack[stack_index..] { + debug!("cycle `{:?}` and `{:?}`", self.r.in_graph.node_data(n), self.r.in_graph.node_data(parent)); + self.r.mark_cycle(n, parent); + } + } + + Color::Black => { + // Cross-edge, record and ignore + self.dag.cross_edges.push((parent, child)); + debug!("cross-edge `{:?} -rf-> {:?}`", + self.r.in_graph.node_data(parent), + self.r.in_graph.node_data(child)); + } + } + } +} diff --git a/src/librustc_incremental/persist/preds/compress/classify/test.rs b/src/librustc_incremental/persist/preds/compress/classify/test.rs new file mode 100644 index 0000000000000..22067a10343d3 --- /dev/null +++ b/src/librustc_incremental/persist/preds/compress/classify/test.rs @@ -0,0 +1,79 @@ +use super::*; + +#[test] +fn detect_cycles() { + let (graph, nodes) = graph! { + A -> C0, + A -> C1, + B -> C1, + C0 -> C1, + C1 -> C0, + C0 -> D, + C1 -> E, + }; + let inputs = ["A", "B"]; + let outputs = ["D", "E"]; + let mut reduce = GraphReduce::new(&graph, |n| inputs.contains(n), |n| outputs.contains(n)); + Classify::new(&mut reduce).walk(); + + assert!(!reduce.in_cycle(nodes("A"), nodes("C0"))); + assert!(!reduce.in_cycle(nodes("B"), nodes("C0"))); + assert!(reduce.in_cycle(nodes("C0"), nodes("C1"))); + assert!(!reduce.in_cycle(nodes("D"), nodes("C0"))); + assert!(!reduce.in_cycle(nodes("E"), nodes("C0"))); + assert!(!reduce.in_cycle(nodes("E"), nodes("A"))); +} + +/// Regr test for a bug where we forgot to pop nodes off of the stack +/// as we were walking. In this case, because edges are pushed to the front +/// of the list, we would visit OUT, then A, then IN, and then close IN (but forget +/// to POP. Then visit B, C, and then A, which would mark everything from A to C as +/// cycle. But since we failed to pop IN, the stack was `OUT, A, IN, B, C` so that +/// marked C and IN as being in a cycle. +#[test] +fn edge_order1() { + let (graph, nodes) = graph! { + A -> C, + C -> B, + B -> A, + IN -> B, + IN -> A, + A -> OUT, + }; + let inputs = ["IN"]; + let outputs = ["OUT"]; + let mut reduce = GraphReduce::new(&graph, |n| inputs.contains(n), |n| outputs.contains(n)); + Classify::new(&mut reduce).walk(); + + assert!(reduce.in_cycle(nodes("B"), nodes("C"))); + + assert!(!reduce.in_cycle(nodes("IN"), nodes("A"))); + assert!(!reduce.in_cycle(nodes("IN"), nodes("B"))); + assert!(!reduce.in_cycle(nodes("IN"), nodes("C"))); + assert!(!reduce.in_cycle(nodes("IN"), nodes("OUT"))); +} + +/// Same as `edge_order1` but in reverse order so as to detect a failure +/// if we were to enqueue edges onto end of list instead. +#[test] +fn edge_order2() { + let (graph, nodes) = graph! { + A -> OUT, + IN -> A, + IN -> B, + B -> A, + C -> B, + A -> C, + }; + let inputs = ["IN"]; + let outputs = ["OUT"]; + let mut reduce = GraphReduce::new(&graph, |n| inputs.contains(n), |n| outputs.contains(n)); + Classify::new(&mut reduce).walk(); + + assert!(reduce.in_cycle(nodes("B"), nodes("C"))); + + assert!(!reduce.in_cycle(nodes("IN"), nodes("A"))); + assert!(!reduce.in_cycle(nodes("IN"), nodes("B"))); + assert!(!reduce.in_cycle(nodes("IN"), nodes("C"))); + assert!(!reduce.in_cycle(nodes("IN"), nodes("OUT"))); +} diff --git a/src/librustc_incremental/persist/preds/compress/construct.rs b/src/librustc_incremental/persist/preds/compress/construct.rs new file mode 100644 index 0000000000000..b0dcb63758e8c --- /dev/null +++ b/src/librustc_incremental/persist/preds/compress/construct.rs @@ -0,0 +1,201 @@ +//! Second phase. Construct new graph. The previous phase has +//! converted the input graph into a DAG by detecting and unifying +//! cycles. It provides us with the following (which is a +//! representation of the DAG): +//! +//! - SCCs, in the form of a union-find repr that can convert each node to +//! its *cycle head* (an arbitrarly chosen representative from the cycle) +//! - a vector of *leaf nodes*, just a convenience +//! - a vector of *parents* for each node (in some cases, nodes have no parents, +//! or their parent is another member of same cycle; in that case, the vector +//! will be stored `v[i] == i`, after canonicalization) +//! - a vector of *cross edges*, meaning add'l edges between graphs nodes beyond +//! the parents. + +use rustc_data_structures::fx::FxHashMap; + +use super::*; + +pub(super) fn construct_graph<'g, N, I, O>(r: &mut GraphReduce<'g, N, I, O>, dag: Dag) + -> Reduction<'g, N> + where N: Debug + Clone, I: Fn(&N) -> bool, O: Fn(&N) -> bool, +{ + let Dag { parents: old_parents, input_nodes, output_nodes, cross_edges } = dag; + let in_graph = r.in_graph; + + debug!("construct_graph"); + + // Create a canonical list of edges; this includes both parent and + // cross-edges. We store this in `(target -> Vec)` form. + // We call the first edge to any given target its "parent". + let mut edges = FxHashMap(); + let old_parent_edges = old_parents.iter().cloned().zip((0..).map(NodeIndex)); + for (source, target) in old_parent_edges.chain(cross_edges) { + debug!("original edge `{:?} -rf-> {:?}`", + in_graph.node_data(source), + in_graph.node_data(target)); + let source = r.cycle_head(source); + let target = r.cycle_head(target); + if source != target { + let v = edges.entry(target).or_insert(vec![]); + if !v.contains(&source) { + debug!("edge `{:?} -rf-> {:?}` is edge #{} with that target", + in_graph.node_data(source), + in_graph.node_data(target), + v.len()); + v.push(source); + } + } + } + let parent = |ni: NodeIndex| -> NodeIndex { + edges[&ni][0] + }; + + // `retain_map`: a map of those nodes that we will want to + // *retain* in the ultimate graph; the key is the node index in + // the old graph, the value is the node index in the new + // graph. These are nodes in the following categories: + // + // - inputs + // - work-products + // - targets of a cross-edge + // + // The first two categories hopefully make sense. We want the + // inputs so we can compare hashes later. We want the + // work-products so we can tell precisely when a given + // work-product is invalidated. But the last one isn't strictly + // needed; we keep cross-target edges so as to minimize the total + // graph size. + // + // Consider a graph like: + // + // WP0 -rf-> Y + // WP1 -rf-> Y + // Y -rf-> INPUT0 + // Y -rf-> INPUT1 + // Y -rf-> INPUT2 + // Y -rf-> INPUT3 + // + // Now if we were to remove Y, we would have a total of 8 edges: both WP0 and WP1 + // depend on INPUT0...INPUT3. As it is, we have 6 edges. + + let mut retain_map = FxHashMap(); + let mut new_graph = Graph::new(); + + { + // Start by adding start-nodes and inputs. + let retained_nodes = output_nodes.iter().chain(&input_nodes).map(|&n| r.cycle_head(n)); + + // Next add in targets of cross-edges. Due to the canonicalization, + // some of these may be self-edges or may may duplicate the parent + // edges, so ignore those. + let retained_nodes = retained_nodes.chain( + edges.iter() + .filter(|&(_, ref sources)| sources.len() > 1) + .map(|(&target, _)| target)); + + // Now create the new graph, adding in the entries from the map. + for n in retained_nodes { + retain_map.entry(n) + .or_insert_with(|| { + let data = in_graph.node_data(n); + debug!("retaining node `{:?}`", data); + new_graph.add_node(data) + }); + } + } + + // Given a cycle-head `ni`, converts it to the closest parent that has + // been retained in the output graph. + let retained_parent = |mut ni: NodeIndex| -> NodeIndex { + loop { + debug!("retained_parent({:?})", in_graph.node_data(ni)); + match retain_map.get(&ni) { + Some(&v) => return v, + None => ni = parent(ni), + } + } + }; + + // Now add in the edges into the graph. + for (&target, sources) in &edges { + if let Some(&r_target) = retain_map.get(&target) { + debug!("adding edges that target `{:?}`", in_graph.node_data(target)); + for &source in sources { + debug!("new edge `{:?} -rf-> {:?}`", + in_graph.node_data(source), + in_graph.node_data(target)); + let r_source = retained_parent(source); + + // NB. In the input graph, we have `a -> b` if b + // **reads from** a. But in the terminology of this + // code, we would describe that edge as `b -> a`, + // because we have edges *from* outputs *to* inputs. + // Therefore, when we create our new graph, we have to + // reverse the edge. + new_graph.add_edge(r_target, r_source, ()); + } + } else { + assert_eq!(sources.len(), 1); + } + } + + // One complication. In some cases, output nodes *may* participate in + // cycles. An example: + // + // [HIR0] [HIR1] + // | | + // v v + // TypeckClosureBody(X) -> ItemSignature(X::SomeClosureInX) + // | ^ | | + // | +-------------------------+ | + // | | + // v v + // Foo Bar + // + // In these cases, the output node may not wind up as the head + // of the cycle, in which case it would be absent from the + // final graph. We don't wish this to happen, therefore we go + // over the list of output nodes again and check for any that + // are not their own cycle-head. If we find such a node, we + // add it to the graph now with an edge from the cycle head. + // So the graph above could get transformed into this: + // + // [HIR0, HIR1] + // | + // v + // TypeckClosureBody(X) ItemSignature(X::SomeClosureInX) + // ^ | | + // +-------------------------+ | + // v + // [Foo, Bar] + // + // (Note that all the edges here are "read-by" edges, not + // "reads-from" edges.) + for &output_node in &output_nodes { + let head = r.cycle_head(output_node); + if output_node == head { + assert!(retain_map.contains_key(&output_node)); + } else { + assert!(!retain_map.contains_key(&output_node)); + let output_data = in_graph.node_data(output_node); + let new_node = new_graph.add_node(output_data); + let new_head_node = retain_map[&head]; + new_graph.add_edge(new_head_node, new_node, ()); + } + } + + // Finally, prepare a list of the input node indices as found in + // the new graph. Note that since all input nodes are leaves in + // the graph, they should never participate in a cycle. + let input_nodes = + input_nodes.iter() + .map(|&n| { + assert_eq!(r.cycle_head(n), n, "input node participating in a cycle"); + retain_map[&n] + }) + .collect(); + + Reduction { graph: new_graph, input_nodes: input_nodes } +} + diff --git a/src/librustc_incremental/persist/preds/compress/dag_id.rs b/src/librustc_incremental/persist/preds/compress/dag_id.rs new file mode 100644 index 0000000000000..5ebabc98fda94 --- /dev/null +++ b/src/librustc_incremental/persist/preds/compress/dag_id.rs @@ -0,0 +1,33 @@ +use rustc_data_structures::graph::NodeIndex; +use rustc_data_structures::unify::UnifyKey; + +#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] +pub struct DagId { + index: u32, +} + +impl DagId { + pub fn from_in_index(n: NodeIndex) -> Self { + DagId { index: n.0 as u32 } + } + + pub fn as_in_index(&self) -> NodeIndex { + NodeIndex(self.index as usize) + } +} + +impl UnifyKey for DagId { + type Value = (); + + fn index(&self) -> u32 { + self.index + } + + fn from_index(u: u32) -> Self { + DagId { index: u } + } + + fn tag(_: Option) -> &'static str { + "DagId" + } +} diff --git a/src/librustc_incremental/persist/preds/compress/mod.rs b/src/librustc_incremental/persist/preds/compress/mod.rs new file mode 100644 index 0000000000000..ecd6d72e22d50 --- /dev/null +++ b/src/librustc_incremental/persist/preds/compress/mod.rs @@ -0,0 +1,125 @@ +// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Graph compression. See `README.md`. + +use rustc_data_structures::graph::{Graph, NodeIndex}; +use rustc_data_structures::unify::UnificationTable; +use std::fmt::Debug; + +#[cfg(test)] +#[macro_use] +mod test_macro; + +mod construct; + +mod classify; +use self::classify::Classify; + +mod dag_id; +use self::dag_id::DagId; + +#[cfg(test)] +mod test; + +pub fn reduce_graph(graph: &Graph, + is_input: I, + is_output: O) -> Reduction + where N: Debug + Clone, + I: Fn(&N) -> bool, + O: Fn(&N) -> bool, +{ + GraphReduce::new(graph, is_input, is_output).compute() +} + +pub struct Reduction<'q, N> where N: 'q + Debug + Clone { + pub graph: Graph<&'q N, ()>, + pub input_nodes: Vec, +} + +struct GraphReduce<'q, N, I, O> + where N: 'q + Debug + Clone, + I: Fn(&N) -> bool, + O: Fn(&N) -> bool, +{ + in_graph: &'q Graph, + unify: UnificationTable, + is_input: I, + is_output: O, +} + +struct Dag { + // The "parent" of a node is the node which reached it during the + // initial DFS. To encode the case of "no parent" (i.e., for the + // roots of the walk), we make `parents[i] == i` to start, which + // turns out be convenient. + parents: Vec, + + // Additional edges beyond the parents. + cross_edges: Vec<(NodeIndex, NodeIndex)>, + + // Nodes which we found that are considered "outputs" + output_nodes: Vec, + + // Nodes which we found that are considered "inputs" + input_nodes: Vec, +} + +#[derive(Copy, Clone, PartialEq, Eq, Hash)] +struct DagNode { + in_index: NodeIndex +} + +impl<'q, N, I, O> GraphReduce<'q, N, I, O> + where N: Debug + Clone, + I: Fn(&N) -> bool, + O: Fn(&N) -> bool, +{ + fn new(in_graph: &'q Graph, is_input: I, is_output: O) -> Self { + let mut unify = UnificationTable::new(); + + // create a set of unification keys whose indices + // correspond to the indices from the input graph + for i in 0..in_graph.len_nodes() { + let k = unify.new_key(()); + assert!(k == DagId::from_in_index(NodeIndex(i))); + } + + GraphReduce { in_graph, unify, is_input, is_output } + } + + fn compute(mut self) -> Reduction<'q, N> { + let dag = Classify::new(&mut self).walk(); + construct::construct_graph(&mut self, dag) + } + + fn inputs(&self, in_node: NodeIndex) -> impl Iterator + 'q { + self.in_graph.predecessor_nodes(in_node) + } + + fn mark_cycle(&mut self, in_node1: NodeIndex, in_node2: NodeIndex) { + let dag_id1 = DagId::from_in_index(in_node1); + let dag_id2 = DagId::from_in_index(in_node2); + self.unify.union(dag_id1, dag_id2); + } + + /// Convert a dag-id into its cycle head representative. This will + /// be a no-op unless `in_node` participates in a cycle, in which + /// case a distinct node *may* be returned. + fn cycle_head(&mut self, in_node: NodeIndex) -> NodeIndex { + let i = DagId::from_in_index(in_node); + self.unify.find(i).as_in_index() + } + + #[cfg(test)] + fn in_cycle(&mut self, ni1: NodeIndex, ni2: NodeIndex) -> bool { + self.cycle_head(ni1) == self.cycle_head(ni2) + } +} diff --git a/src/librustc_incremental/persist/preds/compress/test.rs b/src/librustc_incremental/persist/preds/compress/test.rs new file mode 100644 index 0000000000000..6ac834ee53fbc --- /dev/null +++ b/src/librustc_incremental/persist/preds/compress/test.rs @@ -0,0 +1,243 @@ +use super::*; + +fn reduce(graph: &Graph<&'static str, ()>, + inputs: &[&'static str], + outputs: &[&'static str], + expected: &[&'static str]) +{ + let reduce = GraphReduce::new(&graph, + |n| inputs.contains(n), + |n| outputs.contains(n)); + let result = reduce.compute(); + let mut edges: Vec = + result.graph + .all_edges() + .iter() + .map(|edge| format!("{} -> {}", + result.graph.node_data(edge.source()), + result.graph.node_data(edge.target()))) + .collect(); + edges.sort(); + println!("{:#?}", edges); + assert_eq!(edges.len(), expected.len()); + for (expected, actual) in expected.iter().zip(&edges) { + assert_eq!(expected, actual); + } +} + +#[test] +fn test1() { + // +---------------+ + // | | + // | +--------|------+ + // | | v v + // [A] -> [C0] -> [C1] [D] + // [ ] <- [ ] -> [E] + // ^ + // [B] -------------+ + let (graph, _nodes) = graph! { + A -> C0, + A -> C1, + B -> C1, + C0 -> C1, + C1 -> C0, + C0 -> D, + C1 -> E, + }; + + // [A] -> [C1] -> [D] + // [B] -> [ ] -> [E] + reduce(&graph, &["A", "B"], &["D", "E"], &[ + "A -> C1", + "B -> C1", + "C1 -> D", + "C1 -> E", + ]); +} + +#[test] +fn test2() { + // +---------------+ + // | | + // | +--------|------+ + // | | v v + // [A] -> [C0] -> [C1] [D] -> [E] + // [ ] <- [ ] + // ^ + // [B] -------------+ + let (graph, _nodes) = graph! { + A -> C0, + A -> C1, + B -> C1, + C0 -> C1, + C1 -> C0, + C0 -> D, + D -> E, + }; + + // [A] -> [D] -> [E] + // [B] -> [ ] + reduce(&graph, &["A", "B"], &["D", "E"], &[ + "A -> D", + "B -> D", + "D -> E", + ]); +} + +#[test] +fn test2b() { + // Variant on test2 in which [B] is not + // considered an input. + let (graph, _nodes) = graph! { + A -> C0, + A -> C1, + B -> C1, + C0 -> C1, + C1 -> C0, + C0 -> D, + D -> E, + }; + + // [A] -> [D] -> [E] + reduce(&graph, &["A"], &["D", "E"], &[ + "A -> D", + "D -> E", + ]); +} + +#[test] +fn test3() { + + // Edges going *downwards*, so 0, 1 and 2 are inputs, + // while 7, 8, and 9 are outputs. + // + // 0 1 2 + // | \ / + // 3---+ | + // | | | + // | | | + // 4 5 6 + // \ / \ / \ + // | | | + // 7 8 9 + // + // Here the end result removes node 4, instead encoding an edge + // from n3 -> n7, but keeps nodes 5 and 6, as they are common + // inputs to nodes 8/9. + + let (graph, _nodes) = graph! { + n0 -> n3, + n3 -> n4, + n3 -> n5, + n4 -> n7, + n5 -> n7, + n5 -> n8, + n1 -> n6, + n2 -> n6, + n6 -> n8, + n6 -> n9, + }; + + reduce(&graph, &["n0", "n1", "n2"], &["n7", "n8", "n9"], &[ + "n0 -> n3", + "n1 -> n6", + "n2 -> n6", + "n3 -> n5", + "n3 -> n7", + "n5 -> n7", + "n5 -> n8", + "n6 -> n8", + "n6 -> n9" + ]); +} + +//#[test] +//fn test_cached_dfs_cyclic() { +// +// // 0 1 <---- 2 3 +// // ^ | ^ ^ +// // | v | | +// // 4 ----> 5 ----> 6 ----> 7 +// // ^ ^ ^ ^ +// // | | | | +// // 8 9 10 11 +// +// +// let mut g: Graph = Graph::new(); +// g.add_node(false); +// g.add_node(false); +// g.add_node(false); +// g.add_node(false); +// g.add_node(false); +// g.add_node(false); +// g.add_node(false); +// g.add_node(false); +// g.add_node(true); +// g.add_node(true); +// g.add_node(true); +// g.add_node(true); +// +// g.add_edge(NodeIndex( 4), NodeIndex(0), ()); +// g.add_edge(NodeIndex( 8), NodeIndex(4), ()); +// g.add_edge(NodeIndex( 4), NodeIndex(5), ()); +// g.add_edge(NodeIndex( 1), NodeIndex(5), ()); +// g.add_edge(NodeIndex( 9), NodeIndex(5), ()); +// g.add_edge(NodeIndex( 5), NodeIndex(6), ()); +// g.add_edge(NodeIndex( 6), NodeIndex(2), ()); +// g.add_edge(NodeIndex( 2), NodeIndex(1), ()); +// g.add_edge(NodeIndex(10), NodeIndex(6), ()); +// g.add_edge(NodeIndex( 6), NodeIndex(7), ()); +// g.add_edge(NodeIndex(11), NodeIndex(7), ()); +// g.add_edge(NodeIndex( 7), NodeIndex(3), ()); +// +// let mut ws1 = DfsWorkspace::new(g.len_nodes()); +// let mut ws2 = DfsWorkspace::new(g.len_nodes()); +// let mut visit_counts: Vec<_> = g.all_nodes().iter().map(|_| 0u32).collect(); +// let mut cache: Vec>> = g.all_nodes().iter().map(|_| None).collect(); +// +// fn is_root(x: &bool) -> bool { *x } +// +// for _ in 0 .. CACHING_THRESHOLD + 1 { +// find_roots(&g, 2, &mut visit_counts, &mut cache[..], is_root, &mut ws1, Some(&mut ws2)); +// ws1.output.nodes.sort(); +// assert_eq!(ws1.output.nodes, vec![8, 9, 10]); +// +// find_roots(&g, 3, &mut visit_counts, &mut cache[..], is_root, &mut ws1, Some(&mut ws2)); +// ws1.output.nodes.sort(); +// assert_eq!(ws1.output.nodes, vec![8, 9, 10, 11]); +// } +//} + +#[test] +fn test_cycle_output() { + // +---------------+ + // | | + // | +--------|------+ + // | | v v + // [A] -> [C0] <-> [C1] <- [D] + // +----> [E] + // ^ + // [B] ----------------- ---+ + let (graph, _nodes) = graph! { + A -> C0, + A -> C1, + B -> E, + C0 -> C1, + C1 -> C0, + C0 -> D, + C1 -> E, + D -> C1, + }; + + // [A] -> [C0] <-> [D] + // +----> [E] + // ^ + // [B] -------------+ + reduce(&graph, &["A", "B"], &["D", "E"], &[ + "A -> C0", + "B -> E", + "C0 -> D", + "C0 -> E", + "D -> C0" + ]); +} diff --git a/src/librustc_incremental/persist/preds/compress/test_macro.rs b/src/librustc_incremental/persist/preds/compress/test_macro.rs new file mode 100644 index 0000000000000..66712c018d06d --- /dev/null +++ b/src/librustc_incremental/persist/preds/compress/test_macro.rs @@ -0,0 +1,40 @@ +macro_rules! graph { + ($( $source:ident -> $target:ident, )*) => { + { + use $crate::rustc_data_structures::graph::{Graph, NodeIndex}; + use $crate::rustc_data_structures::fx::FxHashMap; + + let mut graph = Graph::new(); + let mut nodes: FxHashMap<&'static str, NodeIndex> = FxHashMap(); + + for &name in &[ $(stringify!($source), stringify!($target)),* ] { + let name: &'static str = name; + nodes.entry(name) + .or_insert_with(|| graph.add_node(name)); + } + + $( + { + let source = nodes[&stringify!($source)]; + let target = nodes[&stringify!($target)]; + graph.add_edge(source, target, ()); + } + )* + + let f = move |name: &'static str| -> NodeIndex { nodes[&name] }; + + (graph, f) + } + } +} + +macro_rules! set { + ($( $value:expr ),*) => { + { + use $crate::rustc_data_structures::fx::FxHashSet; + let mut set = FxHashSet(); + $(set.insert($value);)* + set + } + } +} diff --git a/src/librustc_incremental/persist/preds/mod.rs b/src/librustc_incremental/persist/preds/mod.rs new file mode 100644 index 0000000000000..a80620fbde66f --- /dev/null +++ b/src/librustc_incremental/persist/preds/mod.rs @@ -0,0 +1,75 @@ +// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use rustc::dep_graph::{DepGraphQuery, DepNode}; +use rustc::hir::def_id::DefId; +use rustc_data_structures::fx::FxHashMap; +use rustc_data_structures::graph::Graph; + +use super::hash::*; +use ich::Fingerprint; + +mod compress; + +/// A data-structure that makes it easy to enumerate the hashable +/// predecessors of any given dep-node. +pub struct Predecessors<'query> { + // A reduced version of the input graph that contains fewer nodes. + // This is intended to keep all of the base inputs (i.e., HIR + // nodes) and all of the "work-products" we may care about + // later. Other nodes may be retained if it keeps the overall size + // of the graph down. + pub reduced_graph: Graph<&'query DepNode, ()>, + + // For the inputs (hir/foreign-metadata), we include hashes. + pub hashes: FxHashMap<&'query DepNode, Fingerprint>, +} + +impl<'q> Predecessors<'q> { + pub fn new(query: &'q DepGraphQuery, hcx: &mut HashContext) -> Self { + let tcx = hcx.tcx; + + let collect_for_metadata = tcx.sess.opts.debugging_opts.incremental_cc || + tcx.sess.opts.debugging_opts.query_dep_graph; + + // Find the set of "start nodes". These are nodes that we will + // possibly query later. + let is_output = |node: &DepNode| -> bool { + match *node { + DepNode::WorkProduct(_) => true, + DepNode::MetaData(ref def_id) => collect_for_metadata && def_id.is_local(), + + // if -Z query-dep-graph is passed, save more extended data + // to enable better unit testing + DepNode::TypeckTables(_) | + DepNode::TransCrateItem(_) => tcx.sess.opts.debugging_opts.query_dep_graph, + + _ => false, + } + }; + + // Reduce the graph to the most important nodes. + let compress::Reduction { graph, input_nodes } = + compress::reduce_graph(&query.graph, HashContext::is_hashable, is_output); + + let mut hashes = FxHashMap(); + for input_index in input_nodes { + let input = *graph.node_data(input_index); + debug!("computing hash for input node `{:?}`", input); + hashes.entry(input) + .or_insert_with(|| hcx.hash(input).unwrap()); + } + + Predecessors { + reduced_graph: graph, + hashes: hashes, + } + } +} diff --git a/src/librustc_incremental/persist/save.rs b/src/librustc_incremental/persist/save.rs index f626905f27d4a..80f51700d60e4 100644 --- a/src/librustc_incremental/persist/save.rs +++ b/src/librustc_incremental/persist/save.rs @@ -14,6 +14,7 @@ use rustc::hir::svh::Svh; use rustc::session::Session; use rustc::ty::TyCtxt; use rustc_data_structures::fx::FxHashMap; +use rustc_data_structures::graph::{NodeIndex, INCOMING}; use rustc_serialize::Encodable as RustcEncodable; use rustc_serialize::opaque::Encoder; use std::hash::Hash; @@ -178,7 +179,9 @@ pub fn encode_dep_graph(preds: &Predecessors, // Create a flat list of (Input, WorkProduct) edges for // serialization. let mut edges = vec![]; - for (&target, sources) in &preds.inputs { + for edge in preds.reduced_graph.all_edges() { + let source = *preds.reduced_graph.node_data(edge.source()); + let target = *preds.reduced_graph.node_data(edge.target()); match *target { DepNode::MetaData(ref def_id) => { // Metadata *targets* are always local metadata nodes. We have @@ -188,11 +191,10 @@ pub fn encode_dep_graph(preds: &Predecessors, } _ => (), } + debug!("serialize edge: {:?} -> {:?}", source, target); + let source = builder.map(source); let target = builder.map(target); - for &source in sources { - let source = builder.map(source); - edges.push((source, target.clone())); - } + edges.push((source, target)); } if tcx.sess.opts.debugging_opts.incremental_dump_hash { @@ -250,12 +252,10 @@ pub fn encode_metadata_hashes(tcx: TyCtxt, let mut def_id_hashes = FxHashMap(); - for (&target, sources) in &preds.inputs { - let def_id = match *target { - DepNode::MetaData(def_id) => { - assert!(def_id.is_local()); - def_id - } + for (index, target) in preds.reduced_graph.all_nodes().iter().enumerate() { + let index = NodeIndex(index); + let def_id = match *target.data { + DepNode::MetaData(def_id) if def_id.is_local() => def_id, _ => continue, }; @@ -281,13 +281,17 @@ pub fn encode_metadata_hashes(tcx: TyCtxt, // is the det. hash of the def-path. This is convenient // because we can sort this to get a stable ordering across // compilations, even if the def-ids themselves have changed. - let mut hashes: Vec<(DepNode, Fingerprint)> = sources.iter() - .map(|dep_node| { - let hash_dep_node = dep_node.map_def(|&def_id| Some(def_id_hash(def_id))).unwrap(); - let hash = preds.hashes[dep_node]; - (hash_dep_node, hash) - }) - .collect(); + let mut hashes: Vec<(DepNode, Fingerprint)> = + preds.reduced_graph + .depth_traverse(index, INCOMING) + .map(|index| preds.reduced_graph.node_data(index)) + .filter(|dep_node| HashContext::is_hashable(dep_node)) + .map(|dep_node| { + let hash_dep_node = dep_node.map_def(|&def_id| Some(def_id_hash(def_id))).unwrap(); + let hash = preds.hashes[dep_node]; + (hash_dep_node, hash) + }) + .collect(); hashes.sort(); let mut state = IchHasher::new(); @@ -298,9 +302,12 @@ pub fn encode_metadata_hashes(tcx: TyCtxt, if tcx.sess.opts.debugging_opts.incremental_dump_hash { println!("metadata hash for {:?} is {}", def_id, hash); - for dep_node in sources { - println!("metadata hash for {:?} depends on {:?} with hash {}", - def_id, dep_node, preds.hashes[dep_node]); + for pred_index in preds.reduced_graph.depth_traverse(index, INCOMING) { + let dep_node = preds.reduced_graph.node_data(pred_index); + if HashContext::is_hashable(&dep_node) { + println!("metadata hash for {:?} depends on {:?} with hash {}", + def_id, dep_node, preds.hashes[dep_node]); + } } } From b117beeed3e3efa3a3a8ff368d7878fd21906f5b Mon Sep 17 00:00:00 2001 From: Josh Driver Date: Wed, 1 Feb 2017 21:03:09 +1030 Subject: [PATCH 04/18] Move derive macro expansion into the MacroExpander This removes the expand_derives function, and sprinkles the functionality throughout the Invocation Collector, Expander and Resolver. --- src/librustc_resolve/macros.rs | 26 ++ src/libsyntax/ext/base.rs | 10 + src/libsyntax/ext/derive.rs | 218 ++++++++++++++++ src/libsyntax/ext/expand.rs | 110 +++++++- src/libsyntax/lib.rs | 1 + src/libsyntax_ext/deriving/custom.rs | 8 +- src/libsyntax_ext/deriving/decodable.rs | 3 +- src/libsyntax_ext/deriving/encodable.rs | 3 +- src/libsyntax_ext/deriving/mod.rs | 239 +----------------- src/libsyntax_ext/lib.rs | 5 +- .../proc-macro/derive-bad.rs | 2 +- .../proc-macro/load-panic.rs | 2 +- .../deriving-meta-unknown-trait.rs | 2 +- src/test/compile-fail/deriving-primitive.rs | 2 +- src/test/compile-fail/macro-error.rs | 2 +- .../compile-fail/macros-nonfatal-errors.rs | 8 +- src/test/ui/custom-derive/issue-36935.stderr | 2 +- 17 files changed, 378 insertions(+), 265 deletions(-) create mode 100644 src/libsyntax/ext/derive.rs diff --git a/src/librustc_resolve/macros.rs b/src/librustc_resolve/macros.rs index 682b3ff834fad..ea3112b2463f8 100644 --- a/src/librustc_resolve/macros.rs +++ b/src/librustc_resolve/macros.rs @@ -250,6 +250,32 @@ impl<'a> base::Resolver for Resolver<'a> { } result } + + fn resolve_builtin_macro(&mut self, tname: Name) -> Result, Determinacy> { + match self.builtin_macros.get(&tname).cloned() { + Some(binding) => Ok(binding.get_macro(self)), + None => Err(Determinacy::Undetermined), + } + } + + fn resolve_derive_macro(&mut self, scope: Mark, path: &ast::Path, force: bool) + -> Result, Determinacy> { + let ast::Path { span, .. } = *path; + match self.resolve_macro(scope, path, false) { + Ok(ext) => match *ext { + SyntaxExtension::BuiltinDerive(..) | + SyntaxExtension::ProcMacroDerive(..) => Ok(ext), + _ => Err(Determinacy::Determined), + }, + Err(Determinacy::Undetermined) if force => { + let msg = format!("cannot find derive macro `{}` in this scope", path); + let mut err = self.session.struct_span_err(span, &msg); + err.emit(); + Err(Determinacy::Determined) + }, + Err(err) => Err(err), + } + } } impl<'a> Resolver<'a> { diff --git a/src/libsyntax/ext/base.rs b/src/libsyntax/ext/base.rs index 17b0b97468df8..9a717b86d091e 100644 --- a/src/libsyntax/ext/base.rs +++ b/src/libsyntax/ext/base.rs @@ -536,6 +536,9 @@ pub trait Resolver { fn find_attr_invoc(&mut self, attrs: &mut Vec) -> Option; fn resolve_macro(&mut self, scope: Mark, path: &ast::Path, force: bool) -> Result, Determinacy>; + fn resolve_builtin_macro(&mut self, tname: Name) -> Result, Determinacy>; + fn resolve_derive_macro(&mut self, scope: Mark, path: &ast::Path, force: bool) + -> Result, Determinacy>; } #[derive(Copy, Clone, Debug)] @@ -562,6 +565,13 @@ impl Resolver for DummyResolver { -> Result, Determinacy> { Err(Determinacy::Determined) } + fn resolve_builtin_macro(&mut self, _tname: Name) -> Result, Determinacy> { + Err(Determinacy::Determined) + } + fn resolve_derive_macro(&mut self, _scope: Mark, _path: &ast::Path, _force: bool) + -> Result, Determinacy> { + Err(Determinacy::Determined) + } } #[derive(Clone)] diff --git a/src/libsyntax/ext/derive.rs b/src/libsyntax/ext/derive.rs new file mode 100644 index 0000000000000..946448eaaee99 --- /dev/null +++ b/src/libsyntax/ext/derive.rs @@ -0,0 +1,218 @@ +// Copyright 2012-2017 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use ast::Name; +use attr; +use ast::{self, NestedMetaItem}; use ext::base::{ExtCtxt, SyntaxExtension}; +use codemap; +use ext::build::AstBuilder; +use feature_gate; +use symbol::Symbol; +use syntax_pos::Span; + +pub fn derive_attr_trait<'a>(cx: &mut ExtCtxt, attr: &'a ast::Attribute) + -> Option<&'a NestedMetaItem> { + if attr.name() != "derive" { + return None; + } + if attr.value_str().is_some() { + cx.span_err(attr.span, "unexpected value in `derive`"); + return None; + } + + let traits = attr.meta_item_list().unwrap_or(&[]); + + if traits.is_empty() { + cx.span_warn(attr.span, "empty trait list in `derive`"); + return None; + } + + return traits.get(0); +} + +pub fn verify_derive_attrs(cx: &mut ExtCtxt, attrs: &[ast::Attribute]) { + for attr in attrs { + if attr.name() != "derive" { + continue; + } + + if attr.value_str().is_some() { + cx.span_err(attr.span, "unexpected value in `derive`"); + } + + let traits = attr.meta_item_list().unwrap_or(&[]).to_owned(); + + if traits.is_empty() { + cx.span_warn(attr.span, "empty trait list in `derive`"); + attr::mark_used(&attr); + continue; + } + for titem in traits { + if titem.word().is_none() { + cx.span_err(titem.span, "malformed `derive` entry"); + } + } + } +} + +#[derive(PartialEq, Debug, Clone, Copy)] +pub enum DeriveType { + Legacy, + ProcMacro, + Builtin +} + +impl DeriveType { + // Classify a derive trait name by resolving the macro. + pub fn classify(cx: &mut ExtCtxt, tname: Name) -> DeriveType { + let legacy_derive_name = Symbol::intern(&format!("derive_{}", tname)); + + if let Ok(_) = cx.resolver.resolve_builtin_macro(legacy_derive_name) { + return DeriveType::Legacy; + } + + match cx.resolver.resolve_builtin_macro(tname) { + Ok(ext) => match *ext { + SyntaxExtension::BuiltinDerive(..) => DeriveType::Builtin, + _ => DeriveType::ProcMacro, + }, + Err(_) => DeriveType::ProcMacro, + } + } +} + +pub fn get_derive_attr(cx: &mut ExtCtxt, attrs: &mut Vec, + derive_type: DeriveType) -> Option { + for i in 0..attrs.len() { + if attrs[i].name() != "derive" { + continue; + } + + if attrs[i].value_str().is_some() { + continue; + } + + let mut traits = attrs[i].meta_item_list().unwrap_or(&[]).to_owned(); + + // First, weed out malformed #[derive] + traits.retain(|titem| titem.word().is_some()); + + let mut titem = None; + + // See if we can find a matching trait. + for j in 0..traits.len() { + let tname = match traits[j].name() { + Some(tname) => tname, + _ => continue, + }; + + if DeriveType::classify(cx, tname) == derive_type { + titem = Some(traits.remove(j)); + break; + } + } + + // If we find a trait, remove the trait from the attribute. + if let Some(titem) = titem { + if traits.len() == 0 { + attrs.remove(i); + } else { + let derive = Symbol::intern("derive"); + let mitem = cx.meta_list(titem.span, derive, traits); + attrs[i] = cx.attribute(titem.span, mitem); + } + let derive = Symbol::intern("derive"); + let mitem = cx.meta_list(titem.span, derive, vec![titem]); + return Some(cx.attribute(mitem.span, mitem)); + } + } + return None; +} + +fn allow_unstable(cx: &mut ExtCtxt, span: Span, attr_name: &str) -> Span { + Span { + expn_id: cx.codemap().record_expansion(codemap::ExpnInfo { + call_site: span, + callee: codemap::NameAndSpan { + format: codemap::MacroAttribute(Symbol::intern(attr_name)), + span: Some(span), + allow_internal_unstable: true, + }, + }), + ..span + } +} + +pub fn add_derived_markers(cx: &mut ExtCtxt, attrs: &mut Vec) { + if attrs.is_empty() { + return; + } + + let titems = attrs.iter().filter(|a| { + a.name() == "derive" + }).flat_map(|a| { + a.meta_item_list().unwrap_or(&[]).iter() + }).filter_map(|titem| { + titem.name() + }).collect::>(); + + let span = attrs[0].span; + + if !attrs.iter().any(|a| a.name() == "structural_match") && + titems.iter().any(|t| *t == "PartialEq") && titems.iter().any(|t| *t == "Eq") { + let structural_match = Symbol::intern("structural_match"); + let span = allow_unstable(cx, span, "derive(PartialEq, Eq)"); + let meta = cx.meta_word(span, structural_match); + attrs.push(cx.attribute(span, meta)); + } + + if !attrs.iter().any(|a| a.name() == "rustc_copy_clone_marker") && + titems.iter().any(|t| *t == "Copy") && titems.iter().any(|t| *t == "Clone") { + let structural_match = Symbol::intern("rustc_copy_clone_marker"); + let span = allow_unstable(cx, span, "derive(Copy, Clone)"); + let meta = cx.meta_word(span, structural_match); + attrs.push(cx.attribute(span, meta)); + } +} + +pub fn find_derive_attr(cx: &mut ExtCtxt, attrs: &mut Vec) + -> Option { + verify_derive_attrs(cx, attrs); + get_derive_attr(cx, attrs, DeriveType::Legacy).and_then(|a| { + let titem = derive_attr_trait(cx, &a); + titem.and_then(|titem| { + let tword = titem.word().unwrap(); + let tname = tword.name(); + if !cx.ecfg.enable_custom_derive() { + feature_gate::emit_feature_err( + &cx.parse_sess, + "custom_derive", + titem.span, + feature_gate::GateIssue::Language, + feature_gate::EXPLAIN_CUSTOM_DERIVE + ); + None + } else { + let name = Symbol::intern(&format!("derive_{}", tname)); + if !cx.resolver.is_whitelisted_legacy_custom_derive(name) { + cx.span_warn(titem.span, + feature_gate::EXPLAIN_DEPR_CUSTOM_DERIVE); + } + let mitem = cx.meta_word(titem.span, name); + Some(cx.attribute(mitem.span, mitem)) + } + }) + }).or_else(|| { + get_derive_attr(cx, attrs, DeriveType::ProcMacro) + }).or_else(|| { + add_derived_markers(cx, attrs); + get_derive_attr(cx, attrs, DeriveType::Builtin) + }) +} diff --git a/src/libsyntax/ext/expand.rs b/src/libsyntax/ext/expand.rs index 01a8c215d47aa..8e7f8830eafbc 100644 --- a/src/libsyntax/ext/expand.rs +++ b/src/libsyntax/ext/expand.rs @@ -8,26 +8,27 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use ast::{Block, Ident, Mac_, PatKind}; +use ast::{self, Block, Ident, Mac_, PatKind}; use ast::{Name, MacStmtStyle, StmtKind, ItemKind}; -use ast; -use ext::hygiene::Mark; -use ext::placeholders::{placeholder, PlaceholderExpander}; use attr::{self, HasAttrs}; use codemap::{ExpnInfo, NameAndSpan, MacroBang, MacroAttribute}; -use syntax_pos::{self, Span, ExpnId}; use config::{is_test_or_bench, StripUnconfigured}; use ext::base::*; +use ext::derive::{find_derive_attr, derive_attr_trait}; +use ext::hygiene::Mark; +use ext::placeholders::{placeholder, PlaceholderExpander}; use feature_gate::{self, Features}; use fold; use fold::*; -use parse::{ParseSess, DirectoryOwnership, PResult, filemap_to_tts}; use parse::parser::Parser; use parse::token; +use parse::{ParseSess, DirectoryOwnership, PResult, filemap_to_tts}; use print::pprust; use ptr::P; use std_inject; +use symbol::Symbol; use symbol::keywords; +use syntax_pos::{self, Span, ExpnId}; use tokenstream::{TokenTree, TokenStream}; use util::small_vector::SmallVector; use visit::Visitor; @@ -166,6 +167,10 @@ pub enum InvocationKind { attr: ast::Attribute, item: Annotatable, }, + Derive { + attr: ast::Attribute, + item: Annotatable, + }, } impl Invocation { @@ -173,6 +178,7 @@ impl Invocation { match self.kind { InvocationKind::Bang { span, .. } => span, InvocationKind::Attr { ref attr, .. } => attr.span, + InvocationKind::Derive { ref attr, .. } => attr.span, } } } @@ -250,6 +256,13 @@ impl<'a, 'b> MacroExpander<'a, 'b> { let path = ast::Path::from_ident(attr.span, ident); self.cx.resolver.resolve_macro(scope, &path, force) } + InvocationKind::Derive { ref attr, .. } => { + let titem = derive_attr_trait(self.cx, &attr).unwrap(); + let tname = titem.name().expect("Expected derive macro name"); + let ident = Ident::with_empty_ctxt(tname); + let path = ast::Path::from_ident(attr.span, ident); + self.cx.resolver.resolve_derive_macro(scope, &path, force) + } }; let ext = match resolution { Ok(ext) => Some(ext), @@ -330,6 +343,7 @@ impl<'a, 'b> MacroExpander<'a, 'b> { match invoc.kind { InvocationKind::Bang { .. } => self.expand_bang_invoc(invoc, ext), InvocationKind::Attr { .. } => self.expand_attr_invoc(invoc, ext), + InvocationKind::Derive { .. } => self.expand_derive_invoc(invoc, ext), } } @@ -486,6 +500,71 @@ impl<'a, 'b> MacroExpander<'a, 'b> { }) } + /// Expand a derive invocation. Returns the result of expansion. + fn expand_derive_invoc(&mut self, invoc: Invocation, ext: Rc) -> Expansion { + let Invocation { expansion_kind: kind, .. } = invoc; + let (attr, item) = match invoc.kind { + InvocationKind::Derive { attr, item } => (attr, item), + _ => unreachable!(), + }; + + attr::mark_used(&attr); + let titem = derive_attr_trait(self.cx, &attr).unwrap(); + let tname = ast::Ident::with_empty_ctxt(titem.name().unwrap()); + let name = Symbol::intern(&format!("derive({})", tname)); + let mitem = &attr.value; + + self.cx.bt_push(ExpnInfo { + call_site: attr.span, + callee: NameAndSpan { + format: MacroAttribute(attr.name()), + span: Some(attr.span), + allow_internal_unstable: false, + } + }); + + match *ext { + SyntaxExtension::ProcMacroDerive(ref ext) => { + let span = Span { + expn_id: self.cx.codemap().record_expansion(ExpnInfo { + call_site: mitem.span, + callee: NameAndSpan { + format: MacroAttribute(Symbol::intern(&format!("derive({})", tname))), + span: None, + allow_internal_unstable: false, + }, + }), + ..mitem.span + }; + return kind.expect_from_annotatables(ext.expand(self.cx, span, &mitem, item)); + } + SyntaxExtension::BuiltinDerive(func) => { + let span = Span { + expn_id: self.cx.codemap().record_expansion(ExpnInfo { + call_site: titem.span, + callee: NameAndSpan { + format: MacroAttribute(name), + span: None, + allow_internal_unstable: true, + }, + }), + ..titem.span + }; + let mut items = Vec::new(); + func(self.cx, span, &mitem, &item, &mut |a| { + items.push(a) + }); + items.insert(0, item); + return kind.expect_from_annotatables(items); + } + _ => { + let msg = &format!("macro `{}` may not be used for derive attributes", name); + self.cx.span_err(attr.span, &msg); + kind.dummy(attr.span) + } + } + } + fn parse_expansion(&mut self, toks: TokenStream, kind: ExpansionKind, name: Name, span: Span) -> Expansion { let mut parser = self.cx.new_parser_from_tts(&toks.trees().cloned().collect::>()); @@ -595,16 +674,31 @@ impl<'a, 'b> InvocationCollector<'a, 'b> { fn collect_attr(&mut self, attr: ast::Attribute, item: Annotatable, kind: ExpansionKind) -> Expansion { - self.collect(kind, InvocationKind::Attr { attr: attr, item: item }) + let invoc_kind = if attr.name() == "derive" { + if kind == ExpansionKind::TraitItems || kind == ExpansionKind::ImplItems { + self.cx.span_err(attr.span, "`derive` can be only be applied to items"); + return kind.expect_from_annotatables(::std::iter::once(item)); + } + InvocationKind::Derive { attr: attr, item: item } + } else { + InvocationKind::Attr { attr: attr, item: item } + }; + + self.collect(kind, invoc_kind) } // If `item` is an attr invocation, remove and return the macro attribute. fn classify_item(&mut self, mut item: T) -> (T, Option) { let mut attr = None; + item = item.map_attrs(|mut attrs| { - attr = self.cx.resolver.find_attr_invoc(&mut attrs); + attr = self.cx.resolver.find_attr_invoc(&mut attrs).or_else(|| { + find_derive_attr(self.cx, &mut attrs) + }); + attrs }); + (item, attr) } diff --git a/src/libsyntax/lib.rs b/src/libsyntax/lib.rs index 871e6b3783a41..4a0deefbc4ac5 100644 --- a/src/libsyntax/lib.rs +++ b/src/libsyntax/lib.rs @@ -129,6 +129,7 @@ pub mod print { pub mod ext { pub mod base; pub mod build; + pub mod derive; pub mod expand; pub mod placeholders; pub mod hygiene; diff --git a/src/libsyntax_ext/deriving/custom.rs b/src/libsyntax_ext/deriving/custom.rs index e118ef1ea01f4..974e86a574124 100644 --- a/src/libsyntax_ext/deriving/custom.rs +++ b/src/libsyntax_ext/deriving/custom.rs @@ -54,7 +54,7 @@ impl MultiItemModifier for ProcMacroDerive { Annotatable::Item(item) => item, Annotatable::ImplItem(_) | Annotatable::TraitItem(_) => { - ecx.span_err(span, "proc_macro_derive attributes may only be \ + ecx.span_err(span, "proc-macro derives may only be \ applied to struct/enum items"); return Vec::new() } @@ -63,7 +63,7 @@ impl MultiItemModifier for ProcMacroDerive { ItemKind::Struct(..) | ItemKind::Enum(..) => {}, _ => { - ecx.span_err(span, "proc_macro_derive attributes may only be \ + ecx.span_err(span, "proc-macro derives may only be \ applied to struct/enum items"); return Vec::new() } @@ -81,7 +81,7 @@ impl MultiItemModifier for ProcMacroDerive { let stream = match res { Ok(stream) => stream, Err(e) => { - let msg = "proc_macro_derive attribute panicked"; + let msg = "proc-macro derive panicked"; let mut err = ecx.struct_span_fatal(span, msg); if let Some(s) = e.downcast_ref::() { err.help(&format!("message: {}", s)); @@ -100,7 +100,7 @@ impl MultiItemModifier for ProcMacroDerive { Ok(new_items) => new_items, Err(_) => { // FIXME: handle this better - let msg = "proc_macro_derive produced unparseable tokens"; + let msg = "proc-macro derive produced unparseable tokens"; ecx.struct_span_fatal(span, msg).emit(); panic!(FatalError); } diff --git a/src/libsyntax_ext/deriving/decodable.rs b/src/libsyntax_ext/deriving/decodable.rs index 6359d642d157c..498f2348b80f1 100644 --- a/src/libsyntax_ext/deriving/decodable.rs +++ b/src/libsyntax_ext/deriving/decodable.rs @@ -13,6 +13,7 @@ use deriving; use deriving::generic::*; use deriving::generic::ty::*; +use deriving::warn_if_deprecated; use syntax::ast; use syntax::ast::{Expr, MetaItem, Mutability}; @@ -35,7 +36,7 @@ pub fn expand_deriving_decodable(cx: &mut ExtCtxt, mitem: &MetaItem, item: &Annotatable, push: &mut FnMut(Annotatable)) { - deriving::warn_if_deprecated(cx, span, "Decodable"); + warn_if_deprecated(cx, span, "Decodable"); expand_deriving_decodable_imp(cx, span, mitem, item, push, "serialize") } diff --git a/src/libsyntax_ext/deriving/encodable.rs b/src/libsyntax_ext/deriving/encodable.rs index a276193e81b97..9d155c22ad031 100644 --- a/src/libsyntax_ext/deriving/encodable.rs +++ b/src/libsyntax_ext/deriving/encodable.rs @@ -91,6 +91,7 @@ use deriving; use deriving::generic::*; use deriving::generic::ty::*; +use deriving::warn_if_deprecated; use syntax::ast::{Expr, ExprKind, MetaItem, Mutability}; use syntax::ext::base::{Annotatable, ExtCtxt}; @@ -112,7 +113,7 @@ pub fn expand_deriving_encodable(cx: &mut ExtCtxt, mitem: &MetaItem, item: &Annotatable, push: &mut FnMut(Annotatable)) { - deriving::warn_if_deprecated(cx, span, "Encodable"); + warn_if_deprecated(cx, span, "Encodable"); expand_deriving_encodable_imp(cx, span, mitem, item, push, "serialize") } diff --git a/src/libsyntax_ext/deriving/mod.rs b/src/libsyntax_ext/deriving/mod.rs index 311b8ae41f8b9..3bceb02f3d6c5 100644 --- a/src/libsyntax_ext/deriving/mod.rs +++ b/src/libsyntax_ext/deriving/mod.rs @@ -11,12 +11,10 @@ //! The compiler code necessary to implement the `#[derive]` extensions. use std::rc::Rc; -use syntax::ast::{self, MetaItem}; -use syntax::attr::HasAttrs; +use syntax::ast; use syntax::codemap; use syntax::ext::base::{Annotatable, ExtCtxt, SyntaxExtension, Resolver}; use syntax::ext::build::AstBuilder; -use syntax::feature_gate; use syntax::ptr::P; use syntax::symbol::Symbol; use syntax_pos::Span; @@ -90,241 +88,6 @@ fn allow_unstable(cx: &mut ExtCtxt, span: Span, attr_name: &str) -> Span { } } -pub fn expand_derive(cx: &mut ExtCtxt, - span: Span, - mitem: &MetaItem, - annotatable: Annotatable) - -> Vec { - debug!("expand_derive: span = {:?}", span); - debug!("expand_derive: mitem = {:?}", mitem); - debug!("expand_derive: annotatable input = {:?}", annotatable); - let mut item = match annotatable { - Annotatable::Item(item) => item, - other => { - cx.span_err(span, "`derive` can only be applied to items"); - return vec![other] - } - }; - - let derive = Symbol::intern("derive"); - let mut derive_attrs = Vec::new(); - item = item.map_attrs(|attrs| { - let partition = attrs.into_iter().partition(|attr| attr.name() == derive); - derive_attrs = partition.0; - partition.1 - }); - - // Expand `#[derive]`s after other attribute macro invocations. - if cx.resolver.find_attr_invoc(&mut item.attrs.clone()).is_some() { - return vec![Annotatable::Item(item.map_attrs(|mut attrs| { - attrs.push(cx.attribute(span, mitem.clone())); - attrs.extend(derive_attrs); - attrs - }))]; - } - - let get_traits = |mitem: &MetaItem, cx: &ExtCtxt| { - if mitem.value_str().is_some() { - cx.span_err(mitem.span, "unexpected value in `derive`"); - } - - let traits = mitem.meta_item_list().unwrap_or(&[]).to_owned(); - if traits.is_empty() { - cx.span_warn(mitem.span, "empty trait list in `derive`"); - } - traits - }; - - let mut traits = get_traits(mitem, cx); - for derive_attr in derive_attrs { - traits.extend(get_traits(&derive_attr.value, cx)); - } - - // First, weed out malformed #[derive] - traits.retain(|titem| { - if titem.word().is_none() { - cx.span_err(titem.span, "malformed `derive` entry"); - false - } else { - true - } - }); - - // Next, check for old-style #[derive(Foo)] - // - // These all get expanded to `#[derive_Foo]` and will get expanded first. If - // we actually add any attributes here then we return to get those expanded - // and then eventually we'll come back to finish off the other derive modes. - let mut new_attributes = Vec::new(); - traits.retain(|titem| { - let tword = titem.word().unwrap(); - let tname = tword.name(); - - if is_builtin_trait(tname) || { - let derive_mode = ast::Path::from_ident(titem.span, ast::Ident::with_empty_ctxt(tname)); - cx.resolver.resolve_macro(cx.current_expansion.mark, &derive_mode, false).map(|ext| { - if let SyntaxExtension::ProcMacroDerive(_) = *ext { true } else { false } - }).unwrap_or(false) - } { - return true; - } - - if !cx.ecfg.enable_custom_derive() { - feature_gate::emit_feature_err(&cx.parse_sess, - "custom_derive", - titem.span, - feature_gate::GateIssue::Language, - feature_gate::EXPLAIN_CUSTOM_DERIVE); - } else { - let name = Symbol::intern(&format!("derive_{}", tname)); - if !cx.resolver.is_whitelisted_legacy_custom_derive(name) { - cx.span_warn(titem.span, feature_gate::EXPLAIN_DEPR_CUSTOM_DERIVE); - } - let mitem = cx.meta_word(titem.span, name); - new_attributes.push(cx.attribute(mitem.span, mitem)); - } - false - }); - if new_attributes.len() > 0 { - item = item.map(|mut i| { - i.attrs.extend(new_attributes); - if traits.len() > 0 { - let list = cx.meta_list(mitem.span, derive, traits); - i.attrs.push(cx.attribute(mitem.span, list)); - } - i - }); - return vec![Annotatable::Item(item)] - } - - // Now check for macros-1.1 style custom #[derive]. - // - // Expand each of them in order given, but *before* we expand any built-in - // derive modes. The logic here is to: - // - // 1. Collect the remaining `#[derive]` annotations into a list. If - // there are any left, attach a `#[derive]` attribute to the item - // that we're currently expanding with the remaining derive modes. - // 2. Manufacture a `#[derive(Foo)]` attribute to pass to the expander. - // 3. Expand the current item we're expanding, getting back a list of - // items that replace it. - // 4. Extend the returned list with the current list of items we've - // collected so far. - // 5. Return everything! - // - // If custom derive extensions end up threading through the `#[derive]` - // attribute, we'll get called again later on to continue expanding - // those modes. - let macros_11_derive = traits.iter() - .cloned() - .enumerate() - .filter(|&(_, ref name)| !is_builtin_trait(name.name().unwrap())) - .next(); - if let Some((i, titem)) = macros_11_derive { - let tname = ast::Ident::with_empty_ctxt(titem.name().unwrap()); - let path = ast::Path::from_ident(titem.span, tname); - let ext = cx.resolver.resolve_macro(cx.current_expansion.mark, &path, false).unwrap(); - - traits.remove(i); - if traits.len() > 0 { - item = item.map(|mut i| { - let list = cx.meta_list(mitem.span, derive, traits); - i.attrs.push(cx.attribute(mitem.span, list)); - i - }); - } - let titem = cx.meta_list_item_word(titem.span, titem.name().unwrap()); - let mitem = cx.meta_list(titem.span, derive, vec![titem]); - let item = Annotatable::Item(item); - - let span = Span { - expn_id: cx.codemap().record_expansion(codemap::ExpnInfo { - call_site: mitem.span, - callee: codemap::NameAndSpan { - format: codemap::MacroAttribute(Symbol::intern(&format!("derive({})", tname))), - span: None, - allow_internal_unstable: false, - }, - }), - ..mitem.span - }; - - if let SyntaxExtension::ProcMacroDerive(ref ext) = *ext { - return ext.expand(cx, span, &mitem, item); - } else { - unreachable!() - } - } - - // Ok, at this point we know that there are no old-style `#[derive_Foo]` nor - // any macros-1.1 style `#[derive(Foo)]`. Expand all built-in traits here. - - // RFC #1445. `#[derive(PartialEq, Eq)]` adds a (trusted) - // `#[structural_match]` attribute. - let (partial_eq, eq) = (Symbol::intern("PartialEq"), Symbol::intern("Eq")); - if traits.iter().any(|t| t.name() == Some(partial_eq)) && - traits.iter().any(|t| t.name() == Some(eq)) { - let structural_match = Symbol::intern("structural_match"); - let span = allow_unstable(cx, span, "derive(PartialEq, Eq)"); - let meta = cx.meta_word(span, structural_match); - item = item.map(|mut i| { - i.attrs.push(cx.attribute(span, meta)); - i - }); - } - - // RFC #1521. `Clone` can assume that `Copy` types' clone implementation is - // the same as the copy implementation. - // - // Add a marker attribute here picked up during #[derive(Clone)] - let (copy, clone) = (Symbol::intern("Copy"), Symbol::intern("Clone")); - if traits.iter().any(|t| t.name() == Some(clone)) && - traits.iter().any(|t| t.name() == Some(copy)) { - let marker = Symbol::intern("rustc_copy_clone_marker"); - let span = allow_unstable(cx, span, "derive(Copy, Clone)"); - let meta = cx.meta_word(span, marker); - item = item.map(|mut i| { - i.attrs.push(cx.attribute(span, meta)); - i - }); - } - - let mut items = Vec::new(); - for titem in traits.iter() { - let tname = titem.word().unwrap().name(); - let name = Symbol::intern(&format!("derive({})", tname)); - let tname_cx = ast::Ident::with_empty_ctxt(titem.name().unwrap()); - let mitem = cx.meta_word(titem.span, name); - let path = ast::Path::from_ident(titem.span, tname_cx); - let ext = cx.resolver.resolve_macro(cx.current_expansion.mark, &path, false).unwrap(); - - let span = Span { - expn_id: cx.codemap().record_expansion(codemap::ExpnInfo { - call_site: titem.span, - callee: codemap::NameAndSpan { - format: codemap::MacroAttribute(name), - span: None, - allow_internal_unstable: true, - }, - }), - ..titem.span - }; - - if let SyntaxExtension::BuiltinDerive(ref func) = *ext { - let my_item = Annotatable::Item(item); - func(cx, span, &mitem, &my_item, &mut |a| { - items.push(a) - }); - item = my_item.expect_item(); - } else { - unreachable!(); - } - } - - items.insert(0, Annotatable::Item(item)); - return items -} - macro_rules! derive_traits { ($( $name:expr => $func:path, )+) => { pub fn is_builtin_trait(name: ast::Name) -> bool { diff --git a/src/libsyntax_ext/lib.rs b/src/libsyntax_ext/lib.rs index e872cfaeacb7b..7533171b08556 100644 --- a/src/libsyntax_ext/lib.rs +++ b/src/libsyntax_ext/lib.rs @@ -24,7 +24,6 @@ #![feature(staged_api)] extern crate fmt_macros; -#[macro_use] extern crate log; #[macro_use] extern crate syntax; @@ -51,7 +50,7 @@ pub mod proc_macro_impl; use std::rc::Rc; use syntax::ast; -use syntax::ext::base::{MacroExpanderFn, NormalTT, MultiModifier, NamedSyntaxExtension}; +use syntax::ext::base::{MacroExpanderFn, NormalTT, NamedSyntaxExtension}; use syntax::symbol::Symbol; pub fn register_builtins(resolver: &mut syntax::ext::base::Resolver, @@ -114,8 +113,6 @@ pub fn register_builtins(resolver: &mut syntax::ext::base::Resolver, register(Symbol::intern("format_args"), NormalTT(Box::new(format::expand_format_args), None, true)); - register(Symbol::intern("derive"), MultiModifier(Box::new(deriving::expand_derive))); - for (name, ext) in user_exts { register(name, ext); } diff --git a/src/test/compile-fail-fulldeps/proc-macro/derive-bad.rs b/src/test/compile-fail-fulldeps/proc-macro/derive-bad.rs index bc4da9fee47ee..42fad803bfa68 100644 --- a/src/test/compile-fail-fulldeps/proc-macro/derive-bad.rs +++ b/src/test/compile-fail-fulldeps/proc-macro/derive-bad.rs @@ -16,7 +16,7 @@ extern crate derive_bad; #[derive( A )] -//~^^ ERROR: proc_macro_derive produced unparseable tokens +//~^^ ERROR: proc-macro derive produced unparseable tokens struct A; fn main() {} diff --git a/src/test/compile-fail-fulldeps/proc-macro/load-panic.rs b/src/test/compile-fail-fulldeps/proc-macro/load-panic.rs index 107273d012dd6..c483c048b418f 100644 --- a/src/test/compile-fail-fulldeps/proc-macro/load-panic.rs +++ b/src/test/compile-fail-fulldeps/proc-macro/load-panic.rs @@ -14,7 +14,7 @@ extern crate derive_panic; #[derive(A)] -//~^ ERROR: proc_macro_derive attribute panicked +//~^ ERROR: proc-macro derive panicked //~| HELP: message: nope! struct Foo; diff --git a/src/test/compile-fail/deriving-meta-unknown-trait.rs b/src/test/compile-fail/deriving-meta-unknown-trait.rs index 596cc1e7d5816..d388ece084417 100644 --- a/src/test/compile-fail/deriving-meta-unknown-trait.rs +++ b/src/test/compile-fail/deriving-meta-unknown-trait.rs @@ -11,7 +11,7 @@ // ignore-tidy-linelength #[derive(Eqr)] -//~^ ERROR `#[derive]` for custom traits is not stable enough for use. It is deprecated and will be removed in v1.15 (see issue #29644) +//~^ ERROR cannot find derive macro `Eqr` in this scope struct Foo; pub fn main() {} diff --git a/src/test/compile-fail/deriving-primitive.rs b/src/test/compile-fail/deriving-primitive.rs index be822a173ab58..97a39a46c19a8 100644 --- a/src/test/compile-fail/deriving-primitive.rs +++ b/src/test/compile-fail/deriving-primitive.rs @@ -8,7 +8,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#[derive(FromPrimitive)] //~ERROR `#[derive]` for custom traits is not stable +#[derive(FromPrimitive)] //~ERROR cannot find derive macro `FromPrimitive` in this scope enum Foo {} fn main() {} diff --git a/src/test/compile-fail/macro-error.rs b/src/test/compile-fail/macro-error.rs index 4a6dbf014a1ca..f467ba3b1e195 100644 --- a/src/test/compile-fail/macro-error.rs +++ b/src/test/compile-fail/macro-error.rs @@ -16,5 +16,5 @@ fn main() { foo!(0); // Check that we report errors at macro definition, not expansion. let _: cfg!(foo) = (); //~ ERROR non-type macro in type position - derive!(); //~ ERROR `derive` can only be used in attributes + derive!(); //~ ERROR macro undefined: 'derive!' } diff --git a/src/test/compile-fail/macros-nonfatal-errors.rs b/src/test/compile-fail/macros-nonfatal-errors.rs index 723e936212a5b..3f710af8ac9a8 100644 --- a/src/test/compile-fail/macros-nonfatal-errors.rs +++ b/src/test/compile-fail/macros-nonfatal-errors.rs @@ -14,9 +14,11 @@ #![feature(asm)] #![feature(trace_macros, concat_idents)] -#[derive(Default, //~ ERROR - Zero)] //~ ERROR -enum CantDeriveThose {} +#[derive(Zero)] //~ ERROR +struct CantDeriveThis; + +#[derive(Default)] //~ ERROR +enum OrDeriveThis {} fn main() { doesnt_exist!(); //~ ERROR diff --git a/src/test/ui/custom-derive/issue-36935.stderr b/src/test/ui/custom-derive/issue-36935.stderr index ad1382cbc8e4b..9a5e2de14e3b0 100644 --- a/src/test/ui/custom-derive/issue-36935.stderr +++ b/src/test/ui/custom-derive/issue-36935.stderr @@ -1,4 +1,4 @@ -error: proc_macro_derive attribute panicked +error: proc-macro derive panicked --> $DIR/issue-36935.rs:17:15 | 17 | #[derive(Foo, Bar)] From 395f23c9f7c7e1107dce5a05e71b3a9480d4e331 Mon Sep 17 00:00:00 2001 From: Nick Cameron Date: Thu, 2 Feb 2017 16:23:27 +1300 Subject: [PATCH 05/18] save-analysis: be more paranoid about generated paths fixes https://github.com/rust-lang-nursery/rls/issues/160 --- src/librustc_save_analysis/lib.rs | 3 +++ src/libsyntax/parse/parser.rs | 1 + 2 files changed, 4 insertions(+) diff --git a/src/librustc_save_analysis/lib.rs b/src/librustc_save_analysis/lib.rs index 8d0cdd1678c73..ebb33a12c8703 100644 --- a/src/librustc_save_analysis/lib.rs +++ b/src/librustc_save_analysis/lib.rs @@ -430,6 +430,9 @@ impl<'l, 'tcx: 'l> SaveContext<'l, 'tcx> { -> Option { self.lookup_ref_id(trait_ref.ref_id).and_then(|def_id| { let span = trait_ref.path.span; + if generated_code(span) { + return None; + } let sub_span = self.span_utils.sub_span_for_type_name(span).or(Some(span)); filter!(self.span_utils, sub_span, span, None); Some(TypeRefData { diff --git a/src/libsyntax/parse/parser.rs b/src/libsyntax/parse/parser.rs index 3480db8ec3b7d..d2bc7bf4f4810 100644 --- a/src/libsyntax/parse/parser.rs +++ b/src/libsyntax/parse/parser.rs @@ -1693,6 +1693,7 @@ impl<'a> Parser<'a> { } // Assemble the span. + // FIXME(#39450) This is bogus if part of the path is macro generated. let span = mk_sp(lo, self.prev_span.hi); // Assemble the result. From 89f9767356b24623e6cade93dad3f59ec3288bb7 Mon Sep 17 00:00:00 2001 From: Austin Bonander Date: Wed, 1 Feb 2017 20:11:10 -0800 Subject: [PATCH 06/18] Change tracking issue for `proc_macro` feature to #38356 --- src/libsyntax/feature_gate.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/libsyntax/feature_gate.rs b/src/libsyntax/feature_gate.rs index 112211851ec05..8eafa5f93d8f3 100644 --- a/src/libsyntax/feature_gate.rs +++ b/src/libsyntax/feature_gate.rs @@ -318,7 +318,7 @@ declare_features! ( (active, abi_unadjusted, "1.16.0", None), // Macros 1.1 - (active, proc_macro, "1.16.0", Some(35900)), + (active, proc_macro, "1.16.0", Some(38356)), // Allows attributes on struct literal fields. (active, struct_field_attributes, "1.16.0", Some(38814)), From 823e185a40baf593a9e0b454a29751f0953ab2a4 Mon Sep 17 00:00:00 2001 From: Son Date: Thu, 2 Feb 2017 22:05:49 +1100 Subject: [PATCH 07/18] Suggest only if resolution was previously resolved --- src/librustc_resolve/resolve_imports.rs | 14 +++++++++++++- .../issue-38054-do-not-show-unresolved-names.rs | 15 +++++++++++++++ ...ssue-38054-do-not-show-unresolved-names.stderr | 14 ++++++++++++++ 3 files changed, 42 insertions(+), 1 deletion(-) create mode 100644 src/test/ui/did_you_mean/issue-38054-do-not-show-unresolved-names.rs create mode 100644 src/test/ui/did_you_mean/issue-38054-do-not-show-unresolved-names.stderr diff --git a/src/librustc_resolve/resolve_imports.rs b/src/librustc_resolve/resolve_imports.rs index 65cdeb9253d89..dbc8bca548b76 100644 --- a/src/librustc_resolve/resolve_imports.rs +++ b/src/librustc_resolve/resolve_imports.rs @@ -639,7 +639,19 @@ impl<'a, 'b:'a> ImportResolver<'a, 'b> { let names = resolutions.iter().filter_map(|(&(ref i, _), resolution)| { if *i == ident { return None; } // Never suggest the same name match *resolution.borrow() { - NameResolution { binding: Some(_), .. } => Some(&i.name), + NameResolution { binding: Some(name_binding), .. } => { + match name_binding.kind { + NameBindingKind::Import { binding, .. } => { + match binding.kind { + // Never suggest the name that has binding error + // i.e. the name that cannot be previously resolved + NameBindingKind::Def(Def::Err) => return None, + _ => Some(&i.name), + } + }, + _ => Some(&i.name), + } + }, NameResolution { single_imports: SingleImports::None, .. } => None, _ => Some(&i.name), } diff --git a/src/test/ui/did_you_mean/issue-38054-do-not-show-unresolved-names.rs b/src/test/ui/did_you_mean/issue-38054-do-not-show-unresolved-names.rs new file mode 100644 index 0000000000000..1938d33e53030 --- /dev/null +++ b/src/test/ui/did_you_mean/issue-38054-do-not-show-unresolved-names.rs @@ -0,0 +1,15 @@ +// Copyright 2017 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use Foo; + +use Foo1; + +fn main() {} diff --git a/src/test/ui/did_you_mean/issue-38054-do-not-show-unresolved-names.stderr b/src/test/ui/did_you_mean/issue-38054-do-not-show-unresolved-names.stderr new file mode 100644 index 0000000000000..325f55e686c62 --- /dev/null +++ b/src/test/ui/did_you_mean/issue-38054-do-not-show-unresolved-names.stderr @@ -0,0 +1,14 @@ +error[E0432]: unresolved import `Foo` + --> $DIR/issue-38054-do-not-show-unresolved-names.rs:11:5 + | +11 | use Foo; + | ^^^ no `Foo` in the root + +error[E0432]: unresolved import `Foo1` + --> $DIR/issue-38054-do-not-show-unresolved-names.rs:13:5 + | +13 | use Foo1; + | ^^^^ no `Foo1` in the root + +error: aborting due to 2 previous errors + From a0efdf34417b5564a7474e8c2175b9e643640b8f Mon Sep 17 00:00:00 2001 From: Dirkjan Ochtman Date: Thu, 2 Feb 2017 22:27:15 +0100 Subject: [PATCH 08/18] Don't check for sudo environment if vendored sources are already configured --- src/bootstrap/bootstrap.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/bootstrap/bootstrap.py b/src/bootstrap/bootstrap.py index 85e8dbce1a955..958ffd11008a3 100644 --- a/src/bootstrap/bootstrap.py +++ b/src/bootstrap/bootstrap.py @@ -438,7 +438,7 @@ def main(): rb.use_vendored_sources = '\nvendor = true' in rb.config_toml or \ 'CFG_ENABLE_VENDOR' in rb.config_mk - if 'SUDO_USER' in os.environ: + if 'SUDO_USER' in os.environ and not rb.use_vendored_sources: if os.environ['USER'] != os.environ['SUDO_USER']: rb.use_vendored_sources = True print('info: looks like you are running this command under `sudo`') From 8e793eb3c64e289d4fa5850bf28d10fe5b3e062a Mon Sep 17 00:00:00 2001 From: Dirkjan Ochtman Date: Thu, 2 Feb 2017 22:28:00 +0100 Subject: [PATCH 09/18] Guard against USER not existing in the environment --- src/bootstrap/bootstrap.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/bootstrap/bootstrap.py b/src/bootstrap/bootstrap.py index 958ffd11008a3..3869b286102c9 100644 --- a/src/bootstrap/bootstrap.py +++ b/src/bootstrap/bootstrap.py @@ -439,7 +439,7 @@ def main(): 'CFG_ENABLE_VENDOR' in rb.config_mk if 'SUDO_USER' in os.environ and not rb.use_vendored_sources: - if os.environ['USER'] != os.environ['SUDO_USER']: + if os.environ.get('USER') != os.environ['SUDO_USER']: rb.use_vendored_sources = True print('info: looks like you are running this command under `sudo`') print(' and so in order to preserve your $HOME this will now') From 51e5cb525db60435ea32c5a77c17ac75fe580f64 Mon Sep 17 00:00:00 2001 From: Dirkjan Ochtman Date: Thu, 2 Feb 2017 22:28:26 +0100 Subject: [PATCH 10/18] Fix typo in bootstrap.py info message --- src/bootstrap/bootstrap.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/bootstrap/bootstrap.py b/src/bootstrap/bootstrap.py index 3869b286102c9..bc8341102421b 100644 --- a/src/bootstrap/bootstrap.py +++ b/src/bootstrap/bootstrap.py @@ -445,7 +445,7 @@ def main(): print(' and so in order to preserve your $HOME this will now') print(' use vendored sources by default. Note that if this') print(' does not work you should run a normal build first') - print(' before running a command like `sudo make intall`') + print(' before running a command like `sudo make install`') if rb.use_vendored_sources: if not os.path.exists('.cargo'): From 681bc5c66cf6f99f7a95ebd5ed38cdd3fb3c4245 Mon Sep 17 00:00:00 2001 From: Alex Crichton Date: Thu, 2 Feb 2017 19:24:20 -0800 Subject: [PATCH 11/18] rustbuild: Add x.py to source tarballs We should be sure to add our build system entry point! Closes #39476 --- src/bootstrap/dist.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/bootstrap/dist.rs b/src/bootstrap/dist.rs index 5fac142f777ff..9327cc0cf7faf 100644 --- a/src/bootstrap/dist.rs +++ b/src/bootstrap/dist.rs @@ -381,7 +381,8 @@ pub fn rust_src(build: &Build) { "README.md", "RELEASES.md", "configure", - "Makefile.in" + "Makefile.in", + "x.py", ]; let src_dirs = [ "man", From ef9ae8581ff17dd88eb0260076bfe80d8d9c4aba Mon Sep 17 00:00:00 2001 From: Niko Matsakis Date: Fri, 3 Feb 2017 06:08:16 -0500 Subject: [PATCH 12/18] make dirty process O(dirty) The old algorithm was O(graph) --- src/librustc_incremental/persist/data.rs | 17 +++-- src/librustc_incremental/persist/load.rs | 92 ++++++++++++++---------- src/librustc_incremental/persist/save.rs | 7 +- 3 files changed, 66 insertions(+), 50 deletions(-) diff --git a/src/librustc_incremental/persist/data.rs b/src/librustc_incremental/persist/data.rs index f0e4f4f99ef08..60f24b71de245 100644 --- a/src/librustc_incremental/persist/data.rs +++ b/src/librustc_incremental/persist/data.rs @@ -21,7 +21,7 @@ use super::directory::DefPathIndex; /// Data for use when recompiling the **current crate**. #[derive(Debug, RustcEncodable, RustcDecodable)] pub struct SerializedDepGraph { - pub edges: Vec, + pub edges: Vec, /// These are hashes of two things: /// - the HIR nodes in this crate @@ -45,14 +45,13 @@ pub struct SerializedDepGraph { pub hashes: Vec, } -/// Represents a "reduced" dependency edge. Unlike the full dep-graph, -/// the dep-graph we serialize contains only edges `S -> T` where the -/// source `S` is something hashable (a HIR node or foreign metadata) -/// and the target `T` is something significant, like a work-product. -/// Normally, significant nodes are only those that have saved data on -/// disk, but in unit-testing the set of significant nodes can be -/// increased. -pub type SerializedEdge = (DepNode, DepNode); +/// Represents a set of "reduced" dependency edge. We group the +/// outgoing edges from a single source together. +#[derive(Debug, RustcEncodable, RustcDecodable)] +pub struct SerializedEdgeSet { + pub source: DepNode, + pub targets: Vec> +} #[derive(Debug, RustcEncodable, RustcDecodable)] pub struct SerializedHash { diff --git a/src/librustc_incremental/persist/load.rs b/src/librustc_incremental/persist/load.rs index e9a59fd762972..b371ab6aa31bc 100644 --- a/src/librustc_incremental/persist/load.rs +++ b/src/librustc_incremental/persist/load.rs @@ -152,6 +152,11 @@ pub fn decode_dep_graph<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, let directory = DefIdDirectory::decode(&mut dep_graph_decoder)?; let serialized_dep_graph = SerializedDepGraph::decode(&mut dep_graph_decoder)?; + let edge_map: FxHashMap<_, _> = serialized_dep_graph.edges + .into_iter() + .map(|s| (s.source, s.targets)) + .collect(); + // Retrace the paths in the directory to find their current location (if any). let retraced = directory.retrace(tcx); @@ -166,46 +171,48 @@ pub fn decode_dep_graph<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, incremental_hashes_map, &serialized_dep_graph.hashes, &retraced); - let dirty_raw_nodes = transitive_dirty_nodes(&serialized_dep_graph.edges, dirty_raw_nodes); + let dirty_raw_nodes = transitive_dirty_nodes(&edge_map, dirty_raw_nodes); // Recreate the edges in the graph that are still clean. let mut clean_work_products = FxHashSet(); let mut dirty_work_products = FxHashSet(); // incomplete; just used to suppress debug output - for edge in &serialized_dep_graph.edges { - // If the target is dirty, skip the edge. If this is an edge - // that targets a work-product, we can print the blame - // information now. - if let Some(blame) = dirty_raw_nodes.get(&edge.1) { - if let DepNode::WorkProduct(ref wp) = edge.1 { - if tcx.sess.opts.debugging_opts.incremental_info { - if dirty_work_products.insert(wp.clone()) { - // It'd be nice to pretty-print these paths better than just - // using the `Debug` impls, but wev. - println!("incremental: module {:?} is dirty because {:?} \ - changed or was removed", - wp, - blame.map_def(|&index| { - Some(directory.def_path_string(tcx, index)) - }).unwrap()); + for (source, targets) in &edge_map { + for target in targets { + // If the target is dirty, skip the edge. If this is an edge + // that targets a work-product, we can print the blame + // information now. + if let Some(blame) = dirty_raw_nodes.get(target) { + if let DepNode::WorkProduct(ref wp) = *target { + if tcx.sess.opts.debugging_opts.incremental_info { + if dirty_work_products.insert(wp.clone()) { + // It'd be nice to pretty-print these paths better than just + // using the `Debug` impls, but wev. + println!("incremental: module {:?} is dirty because {:?} \ + changed or was removed", + wp, + blame.map_def(|&index| { + Some(directory.def_path_string(tcx, index)) + }).unwrap()); + } } } + continue; } - continue; - } - // If the source is dirty, the target will be dirty. - assert!(!dirty_raw_nodes.contains_key(&edge.0)); - - // Retrace the source -> target edges to def-ids and then - // create an edge in the graph. Retracing may yield none if - // some of the data happens to have been removed; this ought - // to be impossible unless it is dirty, so we can unwrap. - let source_node = retraced.map(&edge.0).unwrap(); - let target_node = retraced.map(&edge.1).unwrap(); - let _task = tcx.dep_graph.in_task(target_node); - tcx.dep_graph.read(source_node); - if let DepNode::WorkProduct(ref wp) = edge.1 { - clean_work_products.insert(wp.clone()); + // If the source is dirty, the target will be dirty. + assert!(!dirty_raw_nodes.contains_key(source)); + + // Retrace the source -> target edges to def-ids and then + // create an edge in the graph. Retracing may yield none if + // some of the data happens to have been removed; this ought + // to be impossible unless it is dirty, so we can unwrap. + let source_node = retraced.map(source).unwrap(); + let target_node = retraced.map(target).unwrap(); + let _task = tcx.dep_graph.in_task(target_node); + tcx.dep_graph.read(source_node); + if let DepNode::WorkProduct(ref wp) = *target { + clean_work_products.insert(wp.clone()); + } } } @@ -268,16 +275,23 @@ fn initial_dirty_nodes<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, dirty_nodes } -fn transitive_dirty_nodes(edges: &[SerializedEdge], +fn transitive_dirty_nodes(edge_map: &FxHashMap, Vec>>, mut dirty_nodes: DirtyNodes) -> DirtyNodes { - let mut len = 0; - while len != dirty_nodes.len() { - len = dirty_nodes.len(); - for edge in edges { - if let Some(n) = dirty_nodes.get(&edge.0).cloned() { - dirty_nodes.insert(edge.1.clone(), n); + let mut stack: Vec<(DepNode, DepNode)> = vec![]; + stack.extend(dirty_nodes.iter().map(|(s, b)| (s.clone(), b.clone()))); + while let Some((source, blame)) = stack.pop() { + // we know the source is dirty (because of the node `blame`)... + assert!(dirty_nodes.contains_key(&source)); + + // ...so we dirty all the targets (with the same blame) + if let Some(targets) = edge_map.get(&source) { + for target in targets { + if !dirty_nodes.contains_key(target) { + dirty_nodes.insert(target.clone(), blame.clone()); + stack.push((target.clone(), blame.clone())); + } } } } diff --git a/src/librustc_incremental/persist/save.rs b/src/librustc_incremental/persist/save.rs index 80f51700d60e4..93600631b8d3e 100644 --- a/src/librustc_incremental/persist/save.rs +++ b/src/librustc_incremental/persist/save.rs @@ -178,7 +178,7 @@ pub fn encode_dep_graph(preds: &Predecessors, // Create a flat list of (Input, WorkProduct) edges for // serialization. - let mut edges = vec![]; + let mut edges = FxHashMap(); for edge in preds.reduced_graph.all_edges() { let source = *preds.reduced_graph.node_data(edge.source()); let target = *preds.reduced_graph.node_data(edge.target()); @@ -194,7 +194,7 @@ pub fn encode_dep_graph(preds: &Predecessors, debug!("serialize edge: {:?} -> {:?}", source, target); let source = builder.map(source); let target = builder.map(target); - edges.push((source, target)); + edges.entry(source).or_insert(vec![]).push(target); } if tcx.sess.opts.debugging_opts.incremental_dump_hash { @@ -204,6 +204,9 @@ pub fn encode_dep_graph(preds: &Predecessors, } // Create the serialized dep-graph. + let edges = edges.into_iter() + .map(|(k, v)| SerializedEdgeSet { source: k, targets: v }) + .collect(); let graph = SerializedDepGraph { edges: edges, hashes: preds.hashes From afbf6c82464b6cf0e0b77d3f8458ae31ac38fd6e Mon Sep 17 00:00:00 2001 From: Niko Matsakis Date: Fri, 3 Feb 2017 06:09:47 -0500 Subject: [PATCH 13/18] s/in_index/input_index/ --- .../persist/preds/compress/dag_id.rs | 4 ++-- src/librustc_incremental/persist/preds/compress/mod.rs | 10 +++++----- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/src/librustc_incremental/persist/preds/compress/dag_id.rs b/src/librustc_incremental/persist/preds/compress/dag_id.rs index 5ebabc98fda94..c79930bfae513 100644 --- a/src/librustc_incremental/persist/preds/compress/dag_id.rs +++ b/src/librustc_incremental/persist/preds/compress/dag_id.rs @@ -7,11 +7,11 @@ pub struct DagId { } impl DagId { - pub fn from_in_index(n: NodeIndex) -> Self { + pub fn from_input_index(n: NodeIndex) -> Self { DagId { index: n.0 as u32 } } - pub fn as_in_index(&self) -> NodeIndex { + pub fn as_input_index(&self) -> NodeIndex { NodeIndex(self.index as usize) } } diff --git a/src/librustc_incremental/persist/preds/compress/mod.rs b/src/librustc_incremental/persist/preds/compress/mod.rs index ecd6d72e22d50..974a2221a4575 100644 --- a/src/librustc_incremental/persist/preds/compress/mod.rs +++ b/src/librustc_incremental/persist/preds/compress/mod.rs @@ -89,7 +89,7 @@ impl<'q, N, I, O> GraphReduce<'q, N, I, O> // correspond to the indices from the input graph for i in 0..in_graph.len_nodes() { let k = unify.new_key(()); - assert!(k == DagId::from_in_index(NodeIndex(i))); + assert!(k == DagId::from_input_index(NodeIndex(i))); } GraphReduce { in_graph, unify, is_input, is_output } @@ -105,8 +105,8 @@ impl<'q, N, I, O> GraphReduce<'q, N, I, O> } fn mark_cycle(&mut self, in_node1: NodeIndex, in_node2: NodeIndex) { - let dag_id1 = DagId::from_in_index(in_node1); - let dag_id2 = DagId::from_in_index(in_node2); + let dag_id1 = DagId::from_input_index(in_node1); + let dag_id2 = DagId::from_input_index(in_node2); self.unify.union(dag_id1, dag_id2); } @@ -114,8 +114,8 @@ impl<'q, N, I, O> GraphReduce<'q, N, I, O> /// be a no-op unless `in_node` participates in a cycle, in which /// case a distinct node *may* be returned. fn cycle_head(&mut self, in_node: NodeIndex) -> NodeIndex { - let i = DagId::from_in_index(in_node); - self.unify.find(i).as_in_index() + let i = DagId::from_input_index(in_node); + self.unify.find(i).as_input_index() } #[cfg(test)] From 3c020df3e905f6486fb793a0c41f3ac63f3aa255 Mon Sep 17 00:00:00 2001 From: Son Date: Sat, 4 Feb 2017 00:16:56 +1100 Subject: [PATCH 14/18] tiny doc wording change --- src/libstd/sync/condvar.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/libstd/sync/condvar.rs b/src/libstd/sync/condvar.rs index 8ab30c51b282e..5c85c4879a839 100644 --- a/src/libstd/sync/condvar.rs +++ b/src/libstd/sync/condvar.rs @@ -36,7 +36,7 @@ impl WaitTimeoutResult { /// consumes no CPU time while waiting for an event to occur. Condition /// variables are typically associated with a boolean predicate (a condition) /// and a mutex. The predicate is always verified inside of the mutex before -/// determining that thread must block. +/// determining that a thread must block. /// /// Functions in this module will block the current **thread** of execution and /// are bindings to system-provided condition variables where possible. Note From 7abab8aee09cdc8d23ec8fae5b5ec7675486aef0 Mon Sep 17 00:00:00 2001 From: Niko Matsakis Date: Fri, 3 Feb 2017 12:42:18 -0500 Subject: [PATCH 15/18] add a comment about optimality that somehow got removed --- .../persist/preds/compress/construct.rs | 12 ++ .../persist/preds/compress/test.rs | 122 +++++++++--------- 2 files changed, 76 insertions(+), 58 deletions(-) diff --git a/src/librustc_incremental/persist/preds/compress/construct.rs b/src/librustc_incremental/persist/preds/compress/construct.rs index b0dcb63758e8c..965c773597ede 100644 --- a/src/librustc_incremental/persist/preds/compress/construct.rs +++ b/src/librustc_incremental/persist/preds/compress/construct.rs @@ -78,6 +78,18 @@ pub(super) fn construct_graph<'g, N, I, O>(r: &mut GraphReduce<'g, N, I, O>, dag // // Now if we were to remove Y, we would have a total of 8 edges: both WP0 and WP1 // depend on INPUT0...INPUT3. As it is, we have 6 edges. + // + // NB: The current rules are not optimal. For example, given this + // input graph: + // + // OUT0 -rf-> X + // OUT1 -rf-> X + // X -rf -> INPUT0 + // + // we will preserve X because it has two "consumers" (OUT0 and + // OUT1). We could as easily skip it, but we'd have to tally up + // the number of input nodes that it (transitively) reaches, and I + // was too lazy to do so. This is the unit test `suboptimal`. let mut retain_map = FxHashMap(); let mut new_graph = Graph::new(); diff --git a/src/librustc_incremental/persist/preds/compress/test.rs b/src/librustc_incremental/persist/preds/compress/test.rs index 6ac834ee53fbc..be91677f4d14b 100644 --- a/src/librustc_incremental/persist/preds/compress/test.rs +++ b/src/librustc_incremental/persist/preds/compress/test.rs @@ -151,62 +151,69 @@ fn test3() { ]); } -//#[test] -//fn test_cached_dfs_cyclic() { -// -// // 0 1 <---- 2 3 -// // ^ | ^ ^ -// // | v | | -// // 4 ----> 5 ----> 6 ----> 7 -// // ^ ^ ^ ^ -// // | | | | -// // 8 9 10 11 -// -// -// let mut g: Graph = Graph::new(); -// g.add_node(false); -// g.add_node(false); -// g.add_node(false); -// g.add_node(false); -// g.add_node(false); -// g.add_node(false); -// g.add_node(false); -// g.add_node(false); -// g.add_node(true); -// g.add_node(true); -// g.add_node(true); -// g.add_node(true); -// -// g.add_edge(NodeIndex( 4), NodeIndex(0), ()); -// g.add_edge(NodeIndex( 8), NodeIndex(4), ()); -// g.add_edge(NodeIndex( 4), NodeIndex(5), ()); -// g.add_edge(NodeIndex( 1), NodeIndex(5), ()); -// g.add_edge(NodeIndex( 9), NodeIndex(5), ()); -// g.add_edge(NodeIndex( 5), NodeIndex(6), ()); -// g.add_edge(NodeIndex( 6), NodeIndex(2), ()); -// g.add_edge(NodeIndex( 2), NodeIndex(1), ()); -// g.add_edge(NodeIndex(10), NodeIndex(6), ()); -// g.add_edge(NodeIndex( 6), NodeIndex(7), ()); -// g.add_edge(NodeIndex(11), NodeIndex(7), ()); -// g.add_edge(NodeIndex( 7), NodeIndex(3), ()); -// -// let mut ws1 = DfsWorkspace::new(g.len_nodes()); -// let mut ws2 = DfsWorkspace::new(g.len_nodes()); -// let mut visit_counts: Vec<_> = g.all_nodes().iter().map(|_| 0u32).collect(); -// let mut cache: Vec>> = g.all_nodes().iter().map(|_| None).collect(); -// -// fn is_root(x: &bool) -> bool { *x } -// -// for _ in 0 .. CACHING_THRESHOLD + 1 { -// find_roots(&g, 2, &mut visit_counts, &mut cache[..], is_root, &mut ws1, Some(&mut ws2)); -// ws1.output.nodes.sort(); -// assert_eq!(ws1.output.nodes, vec![8, 9, 10]); -// -// find_roots(&g, 3, &mut visit_counts, &mut cache[..], is_root, &mut ws1, Some(&mut ws2)); -// ws1.output.nodes.sort(); -// assert_eq!(ws1.output.nodes, vec![8, 9, 10, 11]); -// } -//} +#[test] +fn test_cached_dfs_cyclic() { + + // 0 1 <---- 2 3 + // ^ | ^ ^ + // | v | | + // 4 ----> 5 ----> 6 ----> 7 + // ^ ^ ^ ^ + // | | | | + // 8 9 10 11 + + let (graph, _nodes) = graph! { + // edges from above diagram, in columns, top-to-bottom: + n4 -> n0, + n8 -> n4, + n4 -> n5, + n1 -> n5, + n9 -> n5, + n2 -> n1, + n5 -> n6, + n6 -> n2, + n10 -> n6, + n6 -> n7, + n7 -> n3, + n11 -> n7, + }; + + // 0 1 2 3 + // ^ ^ / ^ + // | |/ | + // 4 ----> 5 --------------+ + // ^ ^ \ | + // | | \ | + // 8 9 10 11 + + reduce(&graph, &["n8", "n9", "n10", "n11"], &["n0", "n1", "n2", "n3"], &[ + "n10 -> n5", + "n11 -> n3", + "n4 -> n0", + "n4 -> n5", + "n5 -> n1", + "n5 -> n2", + "n5 -> n3", + "n8 -> n4", + "n9 -> n5" + ]); +} + +/// Demonstrates the case where we don't reduce as much as we could. +#[test] +fn suboptimal() { + let (graph, _nodes) = graph! { + INPUT0 -> X, + X -> OUTPUT0, + X -> OUTPUT1, + }; + + reduce(&graph, &["INPUT0"], &["OUTPUT0", "OUTPUT1"], &[ + "INPUT0 -> X", + "X -> OUTPUT0", + "X -> OUTPUT1" + ]); +} #[test] fn test_cycle_output() { @@ -229,7 +236,7 @@ fn test_cycle_output() { D -> C1, }; - // [A] -> [C0] <-> [D] + // [A] -> [C0] --> [D] // +----> [E] // ^ // [B] -------------+ @@ -238,6 +245,5 @@ fn test_cycle_output() { "B -> E", "C0 -> D", "C0 -> E", - "D -> C0" ]); } From 2a8ee8c80406c95757f7e27ec81b303e241756f3 Mon Sep 17 00:00:00 2001 From: Guillaume Gomez Date: Fri, 3 Feb 2017 22:38:44 +0100 Subject: [PATCH 16/18] Add missing urls in HashMap --- src/libstd/collections/hash/map.rs | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/src/libstd/collections/hash/map.rs b/src/libstd/collections/hash/map.rs index a314717a8772b..8058972e75093 100644 --- a/src/libstd/collections/hash/map.rs +++ b/src/libstd/collections/hash/map.rs @@ -198,9 +198,9 @@ impl DefaultResizePolicy { /// attacks such as HashDoS. /// /// The hashing algorithm can be replaced on a per-`HashMap` basis using the -/// `HashMap::default`, `HashMap::with_hasher`, and -/// `HashMap::with_capacity_and_hasher` methods. Many alternative algorithms -/// are available on crates.io, such as the `fnv` crate. +/// [`HashMap::default`], [`HashMap::with_hasher`], and +/// [`HashMap::with_capacity_and_hasher`] methods. Many alternative algorithms +/// are available on crates.io, such as the [`fnv`] crate. /// /// It is required that the keys implement the [`Eq`] and [`Hash`] traits, although /// this can frequently be achieved by using `#[derive(PartialEq, Eq, Hash)]`. @@ -302,6 +302,10 @@ impl DefaultResizePolicy { /// [`PartialEq`]: ../../std/cmp/trait.PartialEq.html /// [`RefCell`]: ../../std/cell/struct.RefCell.html /// [`Cell`]: ../../std/cell/struct.Cell.html +/// [`HashMap::default`]: #method.default +/// [`HashMap::with_hasher`]: #method.with_hasher +/// [`HashMap::with_capacity_and_hasher`]: #method.with_capacity_and_hasher +/// [`fnv`]: https://crates.io/crates/fnv /// /// ``` /// use std::collections::HashMap; @@ -680,7 +684,9 @@ impl HashMap /// /// # Panics /// - /// Panics if the new allocation size overflows `usize`. + /// Panics if the new allocation size overflows [`usize`]. + /// + /// [`usize`]: ../../std/primitive.usize.html /// /// # Examples /// @@ -1141,13 +1147,14 @@ impl HashMap /// Inserts a key-value pair into the map. /// - /// If the map did not have this key present, `None` is returned. + /// If the map did not have this key present, [`None`] is returned. /// /// If the map did have this key present, the value is updated, and the old /// value is returned. The key is not updated, though; this matters for /// types that can be `==` without being identical. See the [module-level /// documentation] for more. /// + /// [`None`]: ../../std/option/enum.Option.html#variant.None /// [module-level documentation]: index.html#insert-and-complex-keys /// /// # Examples From d650cf5c383aeed222e835009c28901c236820e1 Mon Sep 17 00:00:00 2001 From: Brian Anderson Date: Sat, 4 Feb 2017 01:12:38 +0000 Subject: [PATCH 17/18] Update relnotes for 1.15.1 I already checked this into stable, but it needs to be on master/beta too. --- RELEASES.md | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/RELEASES.md b/RELEASES.md index 99d6b7709ede9..f26c0b6b61161 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -1,3 +1,11 @@ +Version 1.15.1 (2017-02-07) +=========================== + +* [Fix IntoIter::as_mut_slice's signature][39466] + +[39466]: https://github.com/rust-lang/rust/pull/39466 + + Version 1.15.0 (2017-02-02) =========================== From b3096e25c0a574cb20706adabe75667a215a86b9 Mon Sep 17 00:00:00 2001 From: Niko Matsakis Date: Sat, 4 Feb 2017 06:09:19 -0500 Subject: [PATCH 18/18] pacify the mercilous tidy, improve cycle unit test --- .../persist/preds/compress/classify/mod.rs | 14 +++++++++- .../persist/preds/compress/classify/test.rs | 27 ++++++++++++++----- .../persist/preds/compress/construct.rs | 10 +++++++ .../persist/preds/compress/dag_id.rs | 10 +++++++ .../persist/preds/compress/test.rs | 10 +++++++ .../persist/preds/compress/test_macro.rs | 10 +++++++ src/librustc_incremental/persist/save.rs | 3 ++- 7 files changed, 76 insertions(+), 8 deletions(-) diff --git a/src/librustc_incremental/persist/preds/compress/classify/mod.rs b/src/librustc_incremental/persist/preds/compress/classify/mod.rs index f75063f8b9c9a..559bdbdd1e2e5 100644 --- a/src/librustc_incremental/persist/preds/compress/classify/mod.rs +++ b/src/librustc_incremental/persist/preds/compress/classify/mod.rs @@ -1,3 +1,13 @@ +// Copyright 2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + //! First phase. Detect cycles and cross-edges. use super::*; @@ -122,7 +132,9 @@ impl<'a, 'g, N, I, O> Classify<'a, 'g, N, I, O> assert!(self.stack[stack_index] == child); for &n in &self.stack[stack_index..] { - debug!("cycle `{:?}` and `{:?}`", self.r.in_graph.node_data(n), self.r.in_graph.node_data(parent)); + debug!("cycle `{:?}` and `{:?}`", + self.r.in_graph.node_data(n), + self.r.in_graph.node_data(parent)); self.r.mark_cycle(n, parent); } } diff --git a/src/librustc_incremental/persist/preds/compress/classify/test.rs b/src/librustc_incremental/persist/preds/compress/classify/test.rs index 22067a10343d3..ca26f714a2a74 100644 --- a/src/librustc_incremental/persist/preds/compress/classify/test.rs +++ b/src/librustc_incremental/persist/preds/compress/classify/test.rs @@ -1,3 +1,13 @@ +// Copyright 2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + use super::*; #[test] @@ -45,12 +55,17 @@ fn edge_order1() { let mut reduce = GraphReduce::new(&graph, |n| inputs.contains(n), |n| outputs.contains(n)); Classify::new(&mut reduce).walk(); - assert!(reduce.in_cycle(nodes("B"), nodes("C"))); - - assert!(!reduce.in_cycle(nodes("IN"), nodes("A"))); - assert!(!reduce.in_cycle(nodes("IN"), nodes("B"))); - assert!(!reduce.in_cycle(nodes("IN"), nodes("C"))); - assert!(!reduce.in_cycle(nodes("IN"), nodes("OUT"))); + // A, B, and C are mutually in a cycle, but IN/OUT are not participating. + let names = ["A", "B", "C", "IN", "OUT"]; + let cycle_names = ["A", "B", "C"]; + for &i in &names { + for &j in names.iter().filter(|&&j| j != i) { + let in_cycle = cycle_names.contains(&i) && cycle_names.contains(&j); + assert_eq!(reduce.in_cycle(nodes(i), nodes(j)), in_cycle, + "cycle status for nodes {} and {} is incorrect", + i, j); + } + } } /// Same as `edge_order1` but in reverse order so as to detect a failure diff --git a/src/librustc_incremental/persist/preds/compress/construct.rs b/src/librustc_incremental/persist/preds/compress/construct.rs index 965c773597ede..394be74f7835f 100644 --- a/src/librustc_incremental/persist/preds/compress/construct.rs +++ b/src/librustc_incremental/persist/preds/compress/construct.rs @@ -1,3 +1,13 @@ +// Copyright 2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + //! Second phase. Construct new graph. The previous phase has //! converted the input graph into a DAG by detecting and unifying //! cycles. It provides us with the following (which is a diff --git a/src/librustc_incremental/persist/preds/compress/dag_id.rs b/src/librustc_incremental/persist/preds/compress/dag_id.rs index c79930bfae513..a286862e9551f 100644 --- a/src/librustc_incremental/persist/preds/compress/dag_id.rs +++ b/src/librustc_incremental/persist/preds/compress/dag_id.rs @@ -1,3 +1,13 @@ +// Copyright 2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + use rustc_data_structures::graph::NodeIndex; use rustc_data_structures::unify::UnifyKey; diff --git a/src/librustc_incremental/persist/preds/compress/test.rs b/src/librustc_incremental/persist/preds/compress/test.rs index be91677f4d14b..1c5130845a855 100644 --- a/src/librustc_incremental/persist/preds/compress/test.rs +++ b/src/librustc_incremental/persist/preds/compress/test.rs @@ -1,3 +1,13 @@ +// Copyright 2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + use super::*; fn reduce(graph: &Graph<&'static str, ()>, diff --git a/src/librustc_incremental/persist/preds/compress/test_macro.rs b/src/librustc_incremental/persist/preds/compress/test_macro.rs index 66712c018d06d..31b30d2b2857c 100644 --- a/src/librustc_incremental/persist/preds/compress/test_macro.rs +++ b/src/librustc_incremental/persist/preds/compress/test_macro.rs @@ -1,3 +1,13 @@ +// Copyright 2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + macro_rules! graph { ($( $source:ident -> $target:ident, )*) => { { diff --git a/src/librustc_incremental/persist/save.rs b/src/librustc_incremental/persist/save.rs index 93600631b8d3e..34bb125ef3f45 100644 --- a/src/librustc_incremental/persist/save.rs +++ b/src/librustc_incremental/persist/save.rs @@ -290,7 +290,8 @@ pub fn encode_metadata_hashes(tcx: TyCtxt, .map(|index| preds.reduced_graph.node_data(index)) .filter(|dep_node| HashContext::is_hashable(dep_node)) .map(|dep_node| { - let hash_dep_node = dep_node.map_def(|&def_id| Some(def_id_hash(def_id))).unwrap(); + let hash_dep_node = dep_node.map_def(|&def_id| Some(def_id_hash(def_id))) + .unwrap(); let hash = preds.hashes[dep_node]; (hash_dep_node, hash) })