diff --git a/mk/crates.mk b/mk/crates.mk index 5ff6d7a89dbe0..a915d07384f3c 100644 --- a/mk/crates.mk +++ b/mk/crates.mk @@ -112,7 +112,7 @@ DEPS_rustc := syntax fmt_macros flate arena serialize getopts rbml \ rustc_const_math syntax_pos rustc_errors DEPS_rustc_back := std syntax flate log libc DEPS_rustc_borrowck := rustc log graphviz syntax syntax_pos rustc_errors rustc_mir -DEPS_rustc_data_structures := std log serialize +DEPS_rustc_data_structures := std log serialize libc DEPS_rustc_driver := arena flate getopts graphviz libc rustc rustc_back rustc_borrowck \ rustc_typeck rustc_mir rustc_resolve log syntax serialize rustc_llvm \ rustc_trans rustc_privacy rustc_lint rustc_plugin \ @@ -137,9 +137,8 @@ DEPS_rustc_save_analysis := rustc log syntax syntax_pos serialize DEPS_rustc_typeck := rustc syntax syntax_pos rustc_platform_intrinsics rustc_const_math \ rustc_const_eval rustc_errors -DEPS_rustdoc := rustc rustc_driver native:hoedown serialize getopts \ - test rustc_lint rustc_const_eval syntax_pos - +DEPS_rustdoc := rustc rustc_driver native:hoedown serialize getopts test \ + rustc_lint rustc_const_eval syntax_pos rustc_data_structures TOOL_DEPS_compiletest := test getopts log serialize TOOL_DEPS_rustdoc := rustdoc diff --git a/src/doc/book/macros.md b/src/doc/book/macros.md index 9f40829f4233f..78fe07ec1be16 100644 --- a/src/doc/book/macros.md +++ b/src/doc/book/macros.md @@ -662,7 +662,7 @@ Here are some common macros you’ll see in Rust code. This macro causes the current thread to panic. You can give it a message to panic with: -```rust,no_run +```rust,should_panic panic!("oh no!"); ``` @@ -688,7 +688,7 @@ These two macros are used in tests. `assert!` takes a boolean. `assert_eq!` takes two values and checks them for equality. `true` passes, `false` `panic!`s. Like this: -```rust,no_run +```rust,should_panic // A-ok! assert!(true); diff --git a/src/libcore/char.rs b/src/libcore/char.rs index a3440fe8aa644..99e99e5ca4ca6 100644 --- a/src/libcore/char.rs +++ b/src/libcore/char.rs @@ -18,6 +18,7 @@ use prelude::v1::*; use char_private::is_printable; +use convert::TryFrom; use mem::transmute; // UTF-8 ranges and tags for encoding characters @@ -123,12 +124,7 @@ pub const MAX: char = '\u{10ffff}'; #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn from_u32(i: u32) -> Option { - // catch out-of-bounds and surrogates - if (i > MAX as u32) || (i >= 0xD800 && i <= 0xDFFF) { - None - } else { - Some(unsafe { from_u32_unchecked(i) }) - } + char::try_from(i).ok() } /// Converts a `u32` to a `char`, ignoring validity. @@ -176,6 +172,41 @@ pub unsafe fn from_u32_unchecked(i: u32) -> char { transmute(i) } +#[stable(feature = "char_convert", since = "1.12.0")] +impl From for u32 { + #[inline] + fn from(c: char) -> Self { + c as u32 + } +} + +#[stable(feature = "char_convert", since = "1.12.0")] +impl From for char { + #[inline] + fn from(i: u8) -> Self { + i as char + } +} + +#[unstable(feature = "try_from", issue = "33417")] +impl TryFrom for char { + type Err = CharTryFromError; + + #[inline] + fn try_from(i: u32) -> Result { + if (i > MAX as u32) || (i >= 0xD800 && i <= 0xDFFF) { + Err(CharTryFromError(())) + } else { + Ok(unsafe { from_u32_unchecked(i) }) + } + } +} + +/// The error type returned when a conversion from u32 to char fails. +#[unstable(feature = "try_from", issue = "33417")] +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +pub struct CharTryFromError(()); + /// Converts a digit in the given radix to a `char`. /// /// A 'radix' here is sometimes also called a 'base'. A radix of two diff --git a/src/libcore/ops.rs b/src/libcore/ops.rs index 282f281047e47..c9124249bf503 100644 --- a/src/libcore/ops.rs +++ b/src/libcore/ops.rs @@ -421,25 +421,68 @@ mul_impl! { usize u8 u16 u32 u64 isize i8 i16 i32 i64 f32 f64 } /// /// # Examples /// -/// A trivial implementation of `Div`. When `Foo / Foo` happens, it ends up -/// calling `div`, and therefore, `main` prints `Dividing!`. +/// Implementing a `Div`idable rational number struct: /// /// ``` /// use std::ops::Div; /// -/// struct Foo; +/// // The uniqueness of rational numbers in lowest terms is a consequence of +/// // the fundamental theorem of arithmetic. +/// #[derive(Eq)] +/// #[derive(PartialEq, Debug)] +/// struct Rational { +/// nominator: usize, +/// denominator: usize, +/// } /// -/// impl Div for Foo { -/// type Output = Foo; +/// impl Rational { +/// fn new(nominator: usize, denominator: usize) -> Self { +/// if denominator == 0 { +/// panic!("Zero is an invalid denominator!"); +/// } /// -/// fn div(self, _rhs: Foo) -> Foo { -/// println!("Dividing!"); -/// self +/// // Reduce to lowest terms by dividing by the greatest common +/// // divisor. +/// let gcd = gcd(nominator, denominator); +/// Rational { +/// nominator: nominator / gcd, +/// denominator: denominator / gcd, +/// } +/// } +/// } +/// +/// impl Div for Rational { +/// // The division of rational numbers is a closed operation. +/// type Output = Self; +/// +/// fn div(self, rhs: Self) -> Self { +/// if rhs.nominator == 0 { +/// panic!("Cannot divide by zero-valued `Rational`!"); +/// } +/// +/// let nominator = self.nominator * rhs.denominator; +/// let denominator = self.denominator * rhs.nominator; +/// Rational::new(nominator, denominator) +/// } +/// } +/// +/// // Euclid's two-thousand-year-old algorithm for finding the greatest common +/// // divisor. +/// fn gcd(x: usize, y: usize) -> usize { +/// let mut x = x; +/// let mut y = y; +/// while y != 0 { +/// let t = y; +/// y = x % y; +/// x = t; /// } +/// x /// } /// /// fn main() { -/// Foo / Foo; +/// assert_eq!(Rational::new(1, 2), Rational::new(2, 4)); +/// assert_eq!(Rational::new(1, 2) / Rational::new(3, 4), +/// Rational::new(2, 3)); /// } /// ``` /// diff --git a/src/libcore/str/mod.rs b/src/libcore/str/mod.rs index fdcadd43a0fb6..c085c8103fa27 100644 --- a/src/libcore/str/mod.rs +++ b/src/libcore/str/mod.rs @@ -388,8 +388,9 @@ pub fn next_code_point<'a, I: Iterator>(bytes: &mut I) -> Option< /// Reads the last code point out of a byte iterator (assuming a /// UTF-8-like encoding). #[inline] -fn next_code_point_reverse<'a, - I: DoubleEndedIterator>(bytes: &mut I) -> Option { +fn next_code_point_reverse<'a, I>(bytes: &mut I) -> Option + where I: DoubleEndedIterator, +{ // Decode UTF-8 let w = match bytes.next_back() { None => return None, diff --git a/src/libcoretest/char.rs b/src/libcoretest/char.rs index 4632419336d7f..2e67a4b6ad3ba 100644 --- a/src/libcoretest/char.rs +++ b/src/libcoretest/char.rs @@ -9,6 +9,24 @@ // except according to those terms. use std::char; +use std::convert::TryFrom; + +#[test] +fn test_convert() { + assert_eq!(u32::from('a'), 0x61); + assert_eq!(char::from(b'\0'), '\0'); + assert_eq!(char::from(b'a'), 'a'); + assert_eq!(char::from(b'\xFF'), '\u{FF}'); + assert_eq!(char::try_from(0_u32), Ok('\0')); + assert_eq!(char::try_from(0x61_u32), Ok('a')); + assert_eq!(char::try_from(0xD7FF_u32), Ok('\u{D7FF}')); + assert!(char::try_from(0xD800_u32).is_err()); + assert!(char::try_from(0xDFFF_u32).is_err()); + assert_eq!(char::try_from(0xE000_u32), Ok('\u{E000}')); + assert_eq!(char::try_from(0x10FFFF_u32), Ok('\u{10FFFF}')); + assert!(char::try_from(0x110000_u32).is_err()); + assert!(char::try_from(0xFFFF_FFFF_u32).is_err()); +} #[test] fn test_is_lowercase() { diff --git a/src/librustc/diagnostics.rs b/src/librustc/diagnostics.rs index 07e54dc9e8796..7f9e0a4c42a0e 100644 --- a/src/librustc/diagnostics.rs +++ b/src/librustc/diagnostics.rs @@ -1527,6 +1527,37 @@ fn main() { ``` "##, +E0478: r##" +A lifetime bound was not satisfied. + +Erroneous code example: + +// Check that the explicit lifetime bound (`'b`, in this example) must +// outlive all the superbound from the trait (`'a`, in this example). + +```compile_fail,E0478 +trait Wedding<'t>: 't { } + +struct Prince<'kiss, 'SnowWhite> { + child: Box + 'SnowWhite>, + // error: lifetime bound not satisfied +} +``` + +In this example, the `'SnowWhite` lifetime is supposed to outlive the `'kiss` +lifetime but the declaration of the `Prince` struct doesn't enforce it. To fix +this issue, you need to specify it: + +``` +trait Wedding<'t>: 't { } + +struct Prince<'kiss, 'SnowWhite: 'kiss> { // You say here that 'b must live + // longer than 'a. + child: Box + 'SnowWhite>, // And now it's all good! +} +``` +"##, + E0496: r##" A lifetime name is shadowing another lifetime name. Erroneous code example: @@ -1715,7 +1746,6 @@ register_diagnostics! { E0475, // index of slice outside its lifetime E0476, // lifetime of the source pointer does not outlive lifetime bound... E0477, // the type `..` does not fulfill the required lifetime... - E0478, // lifetime bound not satisfied E0479, // the type `..` (provided as the value of a type parameter) is... E0480, // lifetime of method receiver does not outlive the method call E0481, // lifetime of function argument does not outlive the function call diff --git a/src/librustc/hir/svh.rs b/src/librustc/hir/svh.rs index d4e797c9f2d25..ae1f9d3028c2c 100644 --- a/src/librustc/hir/svh.rs +++ b/src/librustc/hir/svh.rs @@ -17,6 +17,7 @@ use std::fmt; use std::hash::{Hash, Hasher}; +use serialize::{Encodable, Decodable, Encoder, Decoder}; #[derive(Copy, Clone, PartialEq, Eq, Debug)] pub struct Svh { @@ -51,3 +52,17 @@ impl fmt::Display for Svh { f.pad(&self.to_string()) } } + +impl Encodable for Svh { + fn encode(&self, s: &mut S) -> Result<(), S::Error> { + s.emit_u64(self.as_u64().to_le()) + } +} + +impl Decodable for Svh { + fn decode(d: &mut D) -> Result { + d.read_u64() + .map(u64::from_le) + .map(Svh::new) + } +} diff --git a/src/librustc/infer/freshen.rs b/src/librustc/infer/freshen.rs index ecd9759c721b2..13a4a6a0906e7 100644 --- a/src/librustc/infer/freshen.rs +++ b/src/librustc/infer/freshen.rs @@ -32,7 +32,8 @@ use ty::{self, Ty, TyCtxt, TypeFoldable}; use ty::fold::TypeFolder; -use std::collections::hash_map::{self, Entry}; +use util::nodemap::FnvHashMap; +use std::collections::hash_map::Entry; use super::InferCtxt; use super::unify_key::ToType; @@ -40,7 +41,7 @@ use super::unify_key::ToType; pub struct TypeFreshener<'a, 'gcx: 'a+'tcx, 'tcx: 'a> { infcx: &'a InferCtxt<'a, 'gcx, 'tcx>, freshen_count: u32, - freshen_map: hash_map::HashMap>, + freshen_map: FnvHashMap>, } impl<'a, 'gcx, 'tcx> TypeFreshener<'a, 'gcx, 'tcx> { @@ -49,7 +50,7 @@ impl<'a, 'gcx, 'tcx> TypeFreshener<'a, 'gcx, 'tcx> { TypeFreshener { infcx: infcx, freshen_count: 0, - freshen_map: hash_map::HashMap::new(), + freshen_map: FnvHashMap(), } } diff --git a/src/librustc/middle/dead.rs b/src/librustc/middle/dead.rs index 2a8594c59a837..73b3137923bfc 100644 --- a/src/librustc/middle/dead.rs +++ b/src/librustc/middle/dead.rs @@ -22,8 +22,8 @@ use ty::{self, TyCtxt}; use hir::def::Def; use hir::def_id::{DefId}; use lint; +use util::nodemap::FnvHashSet; -use std::collections::HashSet; use syntax::{ast, codemap}; use syntax::attr; use syntax_pos; @@ -48,7 +48,7 @@ fn should_explore<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, struct MarkSymbolVisitor<'a, 'tcx: 'a> { worklist: Vec, tcx: TyCtxt<'a, 'tcx, 'tcx>, - live_symbols: Box>, + live_symbols: Box>, struct_has_extern_repr: bool, ignore_non_const_paths: bool, inherited_pub_visibility: bool, @@ -61,7 +61,7 @@ impl<'a, 'tcx> MarkSymbolVisitor<'a, 'tcx> { MarkSymbolVisitor { worklist: worklist, tcx: tcx, - live_symbols: box HashSet::new(), + live_symbols: box FnvHashSet(), struct_has_extern_repr: false, ignore_non_const_paths: false, inherited_pub_visibility: false, @@ -162,7 +162,7 @@ impl<'a, 'tcx> MarkSymbolVisitor<'a, 'tcx> { } fn mark_live_symbols(&mut self) { - let mut scanned = HashSet::new(); + let mut scanned = FnvHashSet(); while !self.worklist.is_empty() { let id = self.worklist.pop().unwrap(); if scanned.contains(&id) { @@ -395,7 +395,7 @@ fn create_and_seed_worklist<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, fn find_live<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, access_levels: &privacy::AccessLevels, krate: &hir::Crate) - -> Box> { + -> Box> { let worklist = create_and_seed_worklist(tcx, access_levels, krate); let mut symbol_visitor = MarkSymbolVisitor::new(tcx, worklist); symbol_visitor.mark_live_symbols(); @@ -413,7 +413,7 @@ fn get_struct_ctor_id(item: &hir::Item) -> Option { struct DeadVisitor<'a, 'tcx: 'a> { tcx: TyCtxt<'a, 'tcx, 'tcx>, - live_symbols: Box>, + live_symbols: Box>, } impl<'a, 'tcx> DeadVisitor<'a, 'tcx> { diff --git a/src/librustc/middle/liveness.rs b/src/librustc/middle/liveness.rs index 74d29b273ff2b..b83826de26dd6 100644 --- a/src/librustc/middle/liveness.rs +++ b/src/librustc/middle/liveness.rs @@ -1479,7 +1479,13 @@ impl<'a, 'tcx> Liveness<'a, 'tcx> { self.ir.tcx.region_maps.call_site_extent(id, body.id), &self.fn_ret(id)); - if self.live_on_entry(entry_ln, self.s.no_ret_var).is_some() { + if fn_ret.is_never() { + // FIXME(durka) this rejects code like `fn foo(x: !) -> ! { x }` + if self.live_on_entry(entry_ln, self.s.clean_exit_var).is_some() { + span_err!(self.ir.tcx.sess, sp, E0270, + "computation may converge in a function marked as diverging"); + } + } else if self.live_on_entry(entry_ln, self.s.no_ret_var).is_some() { let param_env = ParameterEnvironment::for_item(self.ir.tcx, id); let t_ret_subst = fn_ret.subst(self.ir.tcx, ¶m_env.free_substs); let is_nil = self.ir.tcx.infer_ctxt(None, Some(param_env), diff --git a/src/librustc/middle/reachable.rs b/src/librustc/middle/reachable.rs index 6ea0fa20c5726..e29a7cf9d6846 100644 --- a/src/librustc/middle/reachable.rs +++ b/src/librustc/middle/reachable.rs @@ -22,9 +22,8 @@ use hir::def_id::DefId; use ty::{self, TyCtxt}; use middle::privacy; use session::config; -use util::nodemap::NodeSet; +use util::nodemap::{NodeSet, FnvHashSet}; -use std::collections::HashSet; use syntax::abi::Abi; use syntax::ast; use syntax::attr; @@ -204,7 +203,7 @@ impl<'a, 'tcx> ReachableContext<'a, 'tcx> { // Step 2: Mark all symbols that the symbols on the worklist touch. fn propagate(&mut self) { - let mut scanned = HashSet::new(); + let mut scanned = FnvHashSet(); loop { let search_item = match self.worklist.pop() { Some(item) => item, diff --git a/src/librustc/middle/region.rs b/src/librustc/middle/region.rs index 6f0ad087dc589..faf2f7dae08c5 100644 --- a/src/librustc/middle/region.rs +++ b/src/librustc/middle/region.rs @@ -237,7 +237,7 @@ impl CodeExtent { // (This is the special case aluded to in the // doc-comment for this method) let stmt_span = blk.stmts[r.first_statement_index as usize].span; - Some(Span { lo: stmt_span.hi, ..blk.span }) + Some(Span { lo: stmt_span.hi, hi: blk.span.hi, expn_id: stmt_span.expn_id }) } } } diff --git a/src/librustc/session/config.rs b/src/librustc/session/config.rs index e988ddcd97b15..dbbdb2590e7e6 100644 --- a/src/librustc/session/config.rs +++ b/src/librustc/session/config.rs @@ -914,6 +914,8 @@ options! {DebuggingOptions, DebuggingSetter, basic_debugging_options, "force drop flag checks on or off"), trace_macros: bool = (false, parse_bool, [UNTRACKED], "for every macro invocation, print its name and arguments"), + debug_macros: bool = (false, parse_bool, [TRACKED], + "emit line numbers debug info inside macros"), enable_nonzeroing_move_hints: bool = (false, parse_bool, [TRACKED], "force nonzeroing move optimization on"), keep_hygiene_data: bool = (false, parse_bool, [UNTRACKED], diff --git a/src/librustc/session/mod.rs b/src/librustc/session/mod.rs index c71253aee568f..338c656379959 100644 --- a/src/librustc/session/mod.rs +++ b/src/librustc/session/mod.rs @@ -33,10 +33,11 @@ use syntax::feature_gate::AttributeType; use syntax_pos::{Span, MultiSpan}; use rustc_back::target::Target; +use rustc_data_structures::flock; use llvm; use std::path::{Path, PathBuf}; -use std::cell::{Cell, RefCell}; +use std::cell::{self, Cell, RefCell}; use std::collections::{HashMap, HashSet}; use std::env; use std::ffi::CString; @@ -101,6 +102,8 @@ pub struct Session { /// macro name and defintion span in the source crate. pub imported_macro_spans: RefCell>, + incr_comp_session: RefCell, + next_node_id: Cell, } @@ -331,6 +334,76 @@ impl Session { &self.opts.search_paths, kind) } + + pub fn init_incr_comp_session(&self, + session_dir: PathBuf, + lock_file: flock::Lock) { + let mut incr_comp_session = self.incr_comp_session.borrow_mut(); + + if let IncrCompSession::NotInitialized = *incr_comp_session { } else { + bug!("Trying to initialize IncrCompSession `{:?}`", *incr_comp_session) + } + + *incr_comp_session = IncrCompSession::Active { + session_directory: session_dir, + lock_file: lock_file, + }; + } + + pub fn finalize_incr_comp_session(&self, new_directory_path: PathBuf) { + let mut incr_comp_session = self.incr_comp_session.borrow_mut(); + + if let IncrCompSession::Active { .. } = *incr_comp_session { } else { + bug!("Trying to finalize IncrCompSession `{:?}`", *incr_comp_session) + } + + // Note: This will also drop the lock file, thus unlocking the directory + *incr_comp_session = IncrCompSession::Finalized { + session_directory: new_directory_path, + }; + } + + pub fn mark_incr_comp_session_as_invalid(&self) { + let mut incr_comp_session = self.incr_comp_session.borrow_mut(); + + let session_directory = match *incr_comp_session { + IncrCompSession::Active { ref session_directory, .. } => { + session_directory.clone() + } + _ => bug!("Trying to invalidate IncrCompSession `{:?}`", + *incr_comp_session), + }; + + // Note: This will also drop the lock file, thus unlocking the directory + *incr_comp_session = IncrCompSession::InvalidBecauseOfErrors { + session_directory: session_directory + }; + } + + pub fn incr_comp_session_dir(&self) -> cell::Ref { + let incr_comp_session = self.incr_comp_session.borrow(); + cell::Ref::map(incr_comp_session, |incr_comp_session| { + match *incr_comp_session { + IncrCompSession::NotInitialized => { + bug!("Trying to get session directory from IncrCompSession `{:?}`", + *incr_comp_session) + } + IncrCompSession::Active { ref session_directory, .. } | + IncrCompSession::Finalized { ref session_directory } | + IncrCompSession::InvalidBecauseOfErrors { ref session_directory } => { + session_directory + } + } + }) + } + + pub fn incr_comp_session_dir_opt(&self) -> Option> { + if self.opts.incremental.is_some() { + Some(self.incr_comp_session_dir()) + } else { + None + } + } } pub fn build_session(sopts: config::Options, @@ -446,6 +519,7 @@ pub fn build_session_(sopts: config::Options, injected_panic_runtime: Cell::new(None), available_macros: RefCell::new(HashSet::new()), imported_macro_spans: RefCell::new(HashMap::new()), + incr_comp_session: RefCell::new(IncrCompSession::NotInitialized), }; init_llvm(&sess); @@ -453,6 +527,31 @@ pub fn build_session_(sopts: config::Options, sess } +/// Holds data on the current incremental compilation session, if there is one. +#[derive(Debug)] +pub enum IncrCompSession { + // This is the state the session will be in until the incr. comp. dir is + // needed. + NotInitialized, + // This is the state during which the session directory is private and can + // be modified. + Active { + session_directory: PathBuf, + lock_file: flock::Lock, + }, + // This is the state after the session directory has been finalized. In this + // state, the contents of the directory must not be modified any more. + Finalized { + session_directory: PathBuf, + }, + // This is an error state that is reached when some compilation error has + // occurred. It indicates that the contents of the session directory must + // not be used, since they might be invalid. + InvalidBecauseOfErrors { + session_directory: PathBuf, + } +} + fn init_llvm(sess: &Session) { unsafe { // Before we touch LLVM, make sure that multithreading is enabled. diff --git a/src/librustc/util/fs.rs b/src/librustc/util/fs.rs index f4e1c06090e59..d7800ccaa5dd3 100644 --- a/src/librustc/util/fs.rs +++ b/src/librustc/util/fs.rs @@ -56,14 +56,49 @@ pub fn fix_windows_verbatim_for_gcc(p: &Path) -> PathBuf { } } +pub enum LinkOrCopy { + Link, + Copy +} + /// Copy `p` into `q`, preferring to use hard-linking if possible. If /// `q` already exists, it is removed first. -pub fn link_or_copy, Q: AsRef>(p: P, q: Q) -> io::Result<()> { +/// The result indicates which of the two operations has been performed. +pub fn link_or_copy, Q: AsRef>(p: P, q: Q) -> io::Result { let p = p.as_ref(); let q = q.as_ref(); if q.exists() { try!(fs::remove_file(&q)); } - fs::hard_link(p, q) - .or_else(|_| fs::copy(p, q).map(|_| ())) + + match fs::hard_link(p, q) { + Ok(()) => Ok(LinkOrCopy::Link), + Err(_) => { + match fs::copy(p, q) { + Ok(_) => Ok(LinkOrCopy::Copy), + Err(e) => Err(e) + } + } + } +} + +// Like std::fs::create_dir_all, except handles concurrent calls among multiple +// threads or processes. +pub fn create_dir_racy(path: &Path) -> io::Result<()> { + match fs::create_dir(path) { + Ok(()) => return Ok(()), + Err(ref e) if e.kind() == io::ErrorKind::AlreadyExists => return Ok(()), + Err(ref e) if e.kind() == io::ErrorKind::NotFound => {} + Err(e) => return Err(e), + } + match path.parent() { + Some(p) => try!(create_dir_racy(p)), + None => return Err(io::Error::new(io::ErrorKind::Other, + "failed to create whole tree")), + } + match fs::create_dir(path) { + Ok(()) => Ok(()), + Err(ref e) if e.kind() == io::ErrorKind::AlreadyExists => Ok(()), + Err(e) => Err(e), + } } diff --git a/src/librustdoc/flock.rs b/src/librustc_data_structures/flock.rs similarity index 60% rename from src/librustdoc/flock.rs rename to src/librustc_data_structures/flock.rs index 41bcfdb7cb0f0..4a184d3174dff 100644 --- a/src/librustdoc/flock.rs +++ b/src/librustc_data_structures/flock.rs @@ -15,6 +15,7 @@ //! librustdoc, it is not production quality at all. #![allow(non_camel_case_types)] +use std::path::Path; pub use self::imp::Lock; @@ -41,6 +42,7 @@ mod imp { pub l_sysid: libc::c_int, } + pub const F_RDLCK: libc::c_short = 0; pub const F_WRLCK: libc::c_short = 1; pub const F_UNLCK: libc::c_short = 2; pub const F_SETLK: libc::c_int = 6; @@ -60,6 +62,7 @@ mod imp { pub l_sysid: libc::c_int, } + pub const F_RDLCK: libc::c_short = 1; pub const F_UNLCK: libc::c_short = 2; pub const F_WRLCK: libc::c_short = 3; pub const F_SETLK: libc::c_int = 12; @@ -84,6 +87,7 @@ mod imp { pub l_sysid: libc::c_int, } + pub const F_RDLCK: libc::c_short = 1; pub const F_UNLCK: libc::c_short = 2; pub const F_WRLCK: libc::c_short = 3; pub const F_SETLK: libc::c_int = 8; @@ -105,6 +109,7 @@ mod imp { pub l_sysid: libc::c_int, } + pub const F_RDLCK: libc::c_short = 1; pub const F_UNLCK: libc::c_short = 2; pub const F_WRLCK: libc::c_short = 3; pub const F_SETLK: libc::c_int = 8; @@ -124,43 +129,66 @@ mod imp { pub l_pid: libc::pid_t, } + pub const F_RDLCK: libc::c_short = 1; pub const F_WRLCK: libc::c_short = 2; pub const F_UNLCK: libc::c_short = 3; pub const F_SETLK: libc::c_int = 6; pub const F_SETLKW: libc::c_int = 7; } + #[derive(Debug)] pub struct Lock { fd: libc::c_int, } impl Lock { - pub fn new(p: &Path) -> Lock { + pub fn new(p: &Path, + wait: bool, + create: bool, + exclusive: bool) + -> io::Result { let os: &OsStr = p.as_ref(); let buf = CString::new(os.as_bytes()).unwrap(); + let open_flags = if create { + libc::O_RDWR | libc::O_CREAT + } else { + libc::O_RDWR + }; + let fd = unsafe { - libc::open(buf.as_ptr(), libc::O_RDWR | libc::O_CREAT, + libc::open(buf.as_ptr(), open_flags, libc::S_IRWXU as libc::c_int) }; - assert!(fd > 0, "failed to open lockfile: {}", - io::Error::last_os_error()); + + if fd < 0 { + return Err(io::Error::last_os_error()); + } + + let lock_type = if exclusive { + os::F_WRLCK + } else { + os::F_RDLCK + }; + let flock = os::flock { l_start: 0, l_len: 0, l_pid: 0, l_whence: libc::SEEK_SET as libc::c_short, - l_type: os::F_WRLCK, + l_type: lock_type, l_sysid: 0, }; + let cmd = if wait { os::F_SETLKW } else { os::F_SETLK }; let ret = unsafe { - libc::fcntl(fd, os::F_SETLKW, &flock) + libc::fcntl(fd, cmd, &flock) }; if ret == -1 { let err = io::Error::last_os_error(); unsafe { libc::close(fd); } - panic!("could not lock `{}`: {}", p.display(), err); + Err(err) + } else { + Ok(Lock { fd: fd }) } - Lock { fd: fd } } } @@ -191,18 +219,27 @@ mod imp { use std::os::windows::raw::HANDLE; use std::path::Path; use std::fs::{File, OpenOptions}; + use std::os::raw::{c_ulong, c_ulonglong, c_int}; + + type DWORD = c_ulong; + type BOOL = c_int; + type ULONG_PTR = c_ulonglong; - type DWORD = u32; type LPOVERLAPPED = *mut OVERLAPPED; - type BOOL = i32; const LOCKFILE_EXCLUSIVE_LOCK: DWORD = 0x00000002; + const LOCKFILE_FAIL_IMMEDIATELY: DWORD = 0x00000001; + + const FILE_SHARE_DELETE: DWORD = 0x4; + const FILE_SHARE_READ: DWORD = 0x1; + const FILE_SHARE_WRITE: DWORD = 0x2; #[repr(C)] struct OVERLAPPED { - Internal: usize, - InternalHigh: usize, - Pointer: *mut u8, - hEvent: *mut u8, + Internal: ULONG_PTR, + InternalHigh: ULONG_PTR, + Offset: DWORD, + OffsetHigh: DWORD, + hEvent: HANDLE, } extern "system" { @@ -214,24 +251,88 @@ mod imp { lpOverlapped: LPOVERLAPPED) -> BOOL; } + #[derive(Debug)] pub struct Lock { _file: File, } impl Lock { - pub fn new(p: &Path) -> Lock { - let f = OpenOptions::new().read(true).write(true).create(true) - .open(p).unwrap(); + pub fn new(p: &Path, + wait: bool, + create: bool, + exclusive: bool) + -> io::Result { + assert!(p.parent().unwrap().exists(), + "Parent directory of lock-file must exist: {}", + p.display()); + + let share_mode = FILE_SHARE_DELETE | FILE_SHARE_READ | FILE_SHARE_WRITE; + + let mut open_options = OpenOptions::new(); + open_options.read(true) + .share_mode(share_mode); + + if create { + open_options.create(true) + .write(true); + } + + debug!("Attempting to open lock file `{}`", p.display()); + let file = match open_options.open(p) { + Ok(file) => { + debug!("Lock file opened successfully"); + file + } + Err(err) => { + debug!("Error opening lock file: {}", err); + return Err(err) + } + }; + let ret = unsafe { let mut overlapped: OVERLAPPED = mem::zeroed(); - LockFileEx(f.as_raw_handle(), LOCKFILE_EXCLUSIVE_LOCK, 0, 100, 0, + + let mut dwFlags = 0; + if !wait { + dwFlags |= LOCKFILE_FAIL_IMMEDIATELY; + } + + if exclusive { + dwFlags |= LOCKFILE_EXCLUSIVE_LOCK; + } + + debug!("Attempting to acquire lock on lock file `{}`", + p.display()); + LockFileEx(file.as_raw_handle(), + dwFlags, + 0, + 0xFFFF_FFFF, + 0xFFFF_FFFF, &mut overlapped) }; if ret == 0 { let err = io::Error::last_os_error(); - panic!("could not lock `{}`: {}", p.display(), err); + debug!("Failed acquiring file lock: {}", err); + Err(err) + } else { + debug!("Successfully acquired lock."); + Ok(Lock { _file: file }) } - Lock { _file: f } } } + + // Note that we don't need a Drop impl on the Windows: The file is unlocked + // automatically when it's closed. +} + +impl imp::Lock { + pub fn panicking_new(p: &Path, + wait: bool, + create: bool, + exclusive: bool) + -> Lock { + Lock::new(p, wait, create, exclusive).unwrap_or_else(|err| { + panic!("could not lock `{}`: {}", p.display(), err); + }) + } } diff --git a/src/librustc_data_structures/lib.rs b/src/librustc_data_structures/lib.rs index 34c3961d5b4c1..e7da18cef10f9 100644 --- a/src/librustc_data_structures/lib.rs +++ b/src/librustc_data_structures/lib.rs @@ -31,12 +31,15 @@ #![feature(unboxed_closures)] #![feature(fn_traits)] +#![cfg_attr(unix, feature(libc))] #![cfg_attr(test, feature(test))] extern crate core; #[macro_use] extern crate log; extern crate serialize as rustc_serialize; // used by deriving +#[cfg(unix)] +extern crate libc; pub mod bitvec; pub mod graph; @@ -51,6 +54,7 @@ pub mod fnv; pub mod tuple_slice; pub mod veccell; pub mod control_flow_graph; +pub mod flock; // See comments in src/librustc/lib.rs #[doc(hidden)] diff --git a/src/librustc_driver/driver.rs b/src/librustc_driver/driver.rs index c6100004786be..5e4bab943349e 100644 --- a/src/librustc_driver/driver.rs +++ b/src/librustc_driver/driver.rs @@ -88,7 +88,7 @@ pub fn compile_input(sess: &Session, // We need nested scopes here, because the intermediate results can keep // large chunks of memory alive and we want to free them as soon as // possible to keep the peak memory usage low - let (outputs, trans, crate_name) = { + let (outputs, trans) = { let krate = match phase_1_parse_input(sess, cfg, input) { Ok(krate) => krate, Err(mut parse_error) => { @@ -212,11 +212,11 @@ pub fn compile_input(sess: &Session, // Discard interned strings as they are no longer required. token::clear_ident_interner(); - Ok((outputs, trans, crate_name.clone())) + Ok((outputs, trans)) })?? }; - let phase5_result = phase_5_run_llvm_passes(sess, &crate_name, &trans, &outputs); + let phase5_result = phase_5_run_llvm_passes(sess, &trans, &outputs); controller_entry_point!(after_llvm, sess, @@ -228,6 +228,10 @@ pub fn compile_input(sess: &Session, phase_6_link_output(sess, &trans, &outputs); + // Now that we won't touch anything in the incremental compilation directory + // any more, we can finalize it (which involves renaming it) + rustc_incremental::finalize_session_directory(sess, trans.link.crate_hash); + controller_entry_point!(compilation_done, sess, CompileState::state_when_compilation_done(input, sess, outdir, output), @@ -1011,19 +1015,18 @@ pub fn phase_4_translate_to_llvm<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, time(time_passes, "assert dep graph", - move || rustc_incremental::assert_dep_graph(tcx)); + || rustc_incremental::assert_dep_graph(tcx)); time(time_passes, "serialize dep graph", - move || rustc_incremental::save_dep_graph(tcx)); - + || rustc_incremental::save_dep_graph(tcx, + translation.link.crate_hash)); translation } /// Run LLVM itself, producing a bitcode file, assembly file or object file /// as a side effect. pub fn phase_5_run_llvm_passes(sess: &Session, - crate_name: &str, trans: &trans::CrateTranslation, outputs: &OutputFilenames) -> CompileResult { if sess.opts.cg.no_integrated_as { @@ -1046,7 +1049,7 @@ pub fn phase_5_run_llvm_passes(sess: &Session, time(sess.time_passes(), "serialize work products", - move || rustc_incremental::save_work_products(sess, crate_name)); + move || rustc_incremental::save_work_products(sess)); if sess.err_count() > 0 { Err(sess.err_count()) diff --git a/src/librustc_incremental/lib.rs b/src/librustc_incremental/lib.rs index 0d11b0794feac..57013debb1992 100644 --- a/src/librustc_incremental/lib.rs +++ b/src/librustc_incremental/lib.rs @@ -22,6 +22,7 @@ #![feature(question_mark)] #![feature(rustc_private)] #![feature(staged_api)] +#![feature(rand)] extern crate graphviz; extern crate rbml; @@ -44,3 +45,4 @@ pub use persist::save_dep_graph; pub use persist::save_trans_partition; pub use persist::save_work_products; pub use persist::in_incr_comp_dir; +pub use persist::finalize_session_directory; diff --git a/src/librustc_incremental/persist/fs.rs b/src/librustc_incremental/persist/fs.rs new file mode 100644 index 0000000000000..2f94fb774d751 --- /dev/null +++ b/src/librustc_incremental/persist/fs.rs @@ -0,0 +1,988 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + + +//! This module manages how the incremental compilation cache is represented in +//! the file system. +//! +//! Incremental compilation caches are managed according to a copy-on-write +//! strategy: Once a complete, consistent cache version is finalized, it is +//! never modified. Instead, when a subsequent compilation session is started, +//! the compiler will allocate a new version of the cache that starts out as +//! a copy of the previous version. Then only this new copy is modified and it +//! will not be visible to other processes until it is finalized. This ensures +//! that multiple compiler processes can be executed concurrently for the same +//! crate without interfering with each other or blocking each other. +//! +//! More concretely this is implemented via the following protocol: +//! +//! 1. For a newly started compilation session, the compiler allocates a +//! new `session` directory within the incremental compilation directory. +//! This session directory will have a unique name that ends with the suffix +//! "-working" and that contains a creation timestamp. +//! 2. Next, the compiler looks for the newest finalized session directory, +//! that is, a session directory from a previous compilation session that +//! has been marked as valid and consistent. A session directory is +//! considered finalized if the "-working" suffix in the directory name has +//! been replaced by the SVH of the crate. +//! 3. Once the compiler has found a valid, finalized session directory, it will +//! hard-link/copy its contents into the new "-working" directory. If all +//! goes well, it will have its own, private copy of the source directory and +//! subsequently not have to worry about synchronizing with other compiler +//! processes. +//! 4. Now the compiler can do its normal compilation process, which involves +//! reading and updating its private session directory. +//! 5. When compilation finishes without errors, the private session directory +//! will be in a state where it can be used as input for other compilation +//! sessions. That is, it will contain a dependency graph and cache artifacts +//! that are consistent with the state of the source code it was compiled +//! from, with no need to change them ever again. At this point, the compiler +//! finalizes and "publishes" its private session directory by renaming it +//! from "sess-{timestamp}-{random}-working" to "sess-{timestamp}-{SVH}". +//! 6. At this point the "old" session directory that we copied our data from +//! at the beginning of the session has become obsolete because we have just +//! published a more current version. Thus the compiler will delete it. +//! +//! ## Garbage Collection +//! +//! Naively following the above protocol might lead to old session directories +//! piling up if a compiler instance crashes for some reason before its able to +//! remove its private session directory. In order to avoid wasting disk space, +//! the compiler also does some garbage collection each time it is started in +//! incremental compilation mode. Specifically, it will scan the incremental +//! compilation directory for private session directories that are not in use +//! any more and will delete those. It will also delete any finalized session +//! directories for a given crate except for the most recent one. +//! +//! ## Synchronization +//! +//! There is some synchronization needed in order for the compiler to be able to +//! determine whether a given private session directory is not in used any more. +//! This is done by creating a lock file within each session directory and +//! locking it while the directory is still being used. Since file locks have +//! operating system support, we can rely on the lock being released if the +//! compiler process dies for some unexpected reason. Thus, when garbage +//! collecting private session directories, the collecting process can determine +//! whether the directory is still in use by trying to acquire a lock on the +//! file. If locking the file fails, the original process must still be alive. +//! If locking the file succeeds, we know that the owning process is not alive +//! any more and we can safely delete the directory. +//! There is still a small time window between the original process creating the +//! lock file and actually locking it. In order to minimize the chance that +//! another process tries to acquire the lock in just that instance, only +//! session directories that are older than a few seconds are considered for +//! garbage collection. +//! +//! Another case that has to be considered is what happens if one process +//! deletes a finalized session directory that another process is currently +//! trying to copy from. This case is also handled via the lock file. Before +//! a process starts copying a finalized session directory, it will acquire a +//! shared lock on the directory's lock file. Any garbage collecting process, +//! on the other hand, will acquire an exclusive lock on the lock file. +//! Thus, if a directory is being collected, any reader process will fail +//! acquiring the shared lock and will leave the directory alone. Conversely, +//! if a collecting process can't acquire the exclusive lock because the +//! directory is currently being read from, it will leave collecting that +//! directory to another process at a later point in time. +//! The exact same scheme is also used when reading the metadata hashes file +//! from an extern crate. When a crate is compiled, the hash values of its +//! metadata are stored in a file in its session directory. When the +//! compilation session of another crate imports the first crate's metadata, +//! it also has to read in the accompanying metadata hashes. It thus will access +//! the finalized session directory of all crates it links to and while doing +//! so, it will also place a read lock on that the respective session directory +//! so that it won't be deleted while the metadata hashes are loaded. +//! +//! ## Preconditions +//! +//! This system relies on two features being available in the file system in +//! order to work really well: file locking and hard linking. +//! If hard linking is not available (like on FAT) the data in the cache +//! actually has to be copied at the beginning of each session. +//! If file locking does not work reliably (like on NFS), some of the +//! synchronization will go haywire. +//! In both cases we recommend to locate the incremental compilation directory +//! on a file system that supports these things. +//! It might be a good idea though to try and detect whether we are on an +//! unsupported file system and emit a warning in that case. This is not yet +//! implemented. + +use rustc::hir::svh::Svh; +use rustc::middle::cstore::LOCAL_CRATE; +use rustc::session::Session; +use rustc::ty::TyCtxt; +use rustc::util::fs as fs_util; +use rustc_data_structures::flock; +use rustc_data_structures::fnv::{FnvHashSet, FnvHashMap}; + +use std::ffi::OsString; +use std::fs as std_fs; +use std::io; +use std::mem; +use std::path::{Path, PathBuf}; +use std::time::{UNIX_EPOCH, SystemTime, Duration}; +use std::__rand::{thread_rng, Rng}; +use syntax::ast; + +const LOCK_FILE_EXT: &'static str = ".lock"; +const DEP_GRAPH_FILENAME: &'static str = "dep-graph.bin"; +const WORK_PRODUCTS_FILENAME: &'static str = "work-products.bin"; +const METADATA_HASHES_FILENAME: &'static str = "metadata.bin"; + +pub fn dep_graph_path(sess: &Session) -> PathBuf { + in_incr_comp_dir_sess(sess, DEP_GRAPH_FILENAME) +} + +pub fn work_products_path(sess: &Session) -> PathBuf { + in_incr_comp_dir_sess(sess, WORK_PRODUCTS_FILENAME) +} + +pub fn metadata_hash_export_path(sess: &Session) -> PathBuf { + in_incr_comp_dir_sess(sess, METADATA_HASHES_FILENAME) +} + +pub fn metadata_hash_import_path(import_session_dir: &Path) -> PathBuf { + import_session_dir.join(METADATA_HASHES_FILENAME) +} + +pub fn lock_file_path(session_dir: &Path) -> PathBuf { + let crate_dir = session_dir.parent().unwrap(); + + let directory_name = session_dir.file_name().unwrap().to_string_lossy(); + assert_no_characters_lost(&directory_name); + + let dash_indices: Vec<_> = directory_name.match_indices("-") + .map(|(idx, _)| idx) + .collect(); + if dash_indices.len() != 3 { + bug!("Encountered incremental compilation session directory with \ + malformed name: {}", + session_dir.display()) + } + + crate_dir.join(&directory_name[0 .. dash_indices[2]]) + .with_extension(&LOCK_FILE_EXT[1..]) +} + +pub fn in_incr_comp_dir_sess(sess: &Session, file_name: &str) -> PathBuf { + in_incr_comp_dir(&sess.incr_comp_session_dir(), file_name) +} + +pub fn in_incr_comp_dir(incr_comp_session_dir: &Path, file_name: &str) -> PathBuf { + incr_comp_session_dir.join(file_name) +} + +/// Allocates the private session directory. The boolean in the Ok() result +/// indicates whether we should try loading a dep graph from the successfully +/// initialized directory, or not. +/// The post-condition of this fn is that we have a valid incremental +/// compilation session directory, if the result is `Ok`. A valid session +/// directory is one that contains a locked lock file. It may or may not contain +/// a dep-graph and work products from a previous session. +/// If the call fails, the fn may leave behind an invalid session directory. +/// The garbage collection will take care of it. +pub fn prepare_session_directory(tcx: TyCtxt) -> Result { + debug!("prepare_session_directory"); + + // {incr-comp-dir}/{crate-name-and-disambiguator} + let crate_dir = crate_path_tcx(tcx, LOCAL_CRATE); + debug!("crate-dir: {}", crate_dir.display()); + try!(create_dir(tcx.sess, &crate_dir, "crate")); + + let mut source_directories_already_tried = FnvHashSet(); + + loop { + // Generate a session directory of the form: + // + // {incr-comp-dir}/{crate-name-and-disambiguator}/sess-{timestamp}-{random}-working + let session_dir = generate_session_dir_path(&crate_dir); + debug!("session-dir: {}", session_dir.display()); + + // Lock the new session directory. If this fails, return an + // error without retrying + let (directory_lock, lock_file_path) = try!(lock_directory(tcx.sess, &session_dir)); + + // Now that we have the lock, we can actually create the session + // directory + try!(create_dir(tcx.sess, &session_dir, "session")); + + // Find a suitable source directory to copy from. Ignore those that we + // have already tried before. + let source_directory = find_source_directory(&crate_dir, + &source_directories_already_tried); + + let source_directory = if let Some(dir) = source_directory { + dir + } else { + // There's nowhere to copy from, we're done + debug!("no source directory found. Continuing with empty session \ + directory."); + + tcx.sess.init_incr_comp_session(session_dir, directory_lock); + return Ok(false) + }; + + debug!("attempting to copy data from source: {}", + source_directory.display()); + + let print_file_copy_stats = tcx.sess.opts.debugging_opts.incremental_info; + + // Try copying over all files from the source directory + if copy_files(&session_dir, &source_directory, print_file_copy_stats).is_ok() { + debug!("successfully copied data from: {}", + source_directory.display()); + + tcx.sess.init_incr_comp_session(session_dir, directory_lock); + return Ok(true) + } else { + debug!("copying failed - trying next directory"); + + // Something went wrong while trying to copy/link files from the + // source directory. Try again with a different one. + source_directories_already_tried.insert(source_directory); + + // Try to remove the session directory we just allocated. We don't + // know if there's any garbage in it from the failed copy action. + if let Err(err) = std_fs::remove_dir_all(&session_dir) { + tcx.sess.warn(&format!("Failed to delete partly initialized \ + session dir `{}`: {}", + session_dir.display(), + err)); + } + + delete_session_dir_lock_file(tcx.sess, &lock_file_path); + mem::drop(directory_lock); + } + } +} + + +/// This function finalizes and thus 'publishes' the session directory by +/// renaming it to `sess-{timestamp}-{svh}` and releasing the file lock. +/// If there have been compilation errors, however, this function will just +/// delete the presumably invalid session directory. +pub fn finalize_session_directory(sess: &Session, svh: Svh) { + if sess.opts.incremental.is_none() { + return; + } + + let incr_comp_session_dir: PathBuf = sess.incr_comp_session_dir().clone(); + + if sess.has_errors() { + // If there have been any errors during compilation, we don't want to + // publish this session directory. Rather, we'll just delete it. + + debug!("finalize_session_directory() - invalidating session directory: {}", + incr_comp_session_dir.display()); + + if let Err(err) = std_fs::remove_dir_all(&*incr_comp_session_dir) { + sess.warn(&format!("Error deleting incremental compilation \ + session directory `{}`: {}", + incr_comp_session_dir.display(), + err)); + } + + let lock_file_path = lock_file_path(&*incr_comp_session_dir); + delete_session_dir_lock_file(sess, &lock_file_path); + sess.mark_incr_comp_session_as_invalid(); + } + + debug!("finalize_session_directory() - session directory: {}", + incr_comp_session_dir.display()); + + let old_sub_dir_name = incr_comp_session_dir.file_name() + .unwrap() + .to_string_lossy(); + assert_no_characters_lost(&old_sub_dir_name); + + // Keep the 'sess-{timestamp}-{random-number}' prefix, but replace the + // '-working' part with the SVH of the crate + let dash_indices: Vec<_> = old_sub_dir_name.match_indices("-") + .map(|(idx, _)| idx) + .collect(); + if dash_indices.len() != 3 { + bug!("Encountered incremental compilation session directory with \ + malformed name: {}", + incr_comp_session_dir.display()) + } + + // State: "sess-{timestamp}-{random-number}-" + let mut new_sub_dir_name = String::from(&old_sub_dir_name[.. dash_indices[2] + 1]); + + // Append the svh + new_sub_dir_name.push_str(&svh.to_string()); + + // Create the full path + let new_path = incr_comp_session_dir.parent().unwrap().join(new_sub_dir_name); + debug!("finalize_session_directory() - new path: {}", new_path.display()); + + match std_fs::rename(&*incr_comp_session_dir, &new_path) { + Ok(_) => { + debug!("finalize_session_directory() - directory renamed successfully"); + + // This unlocks the directory + sess.finalize_incr_comp_session(new_path); + } + Err(e) => { + // Warn about the error. However, no need to abort compilation now. + sess.warn(&format!("Error finalizing incremental compilation \ + session directory `{}`: {}", + incr_comp_session_dir.display(), + e)); + + debug!("finalize_session_directory() - error, marking as invalid"); + // Drop the file lock, so we can garage collect + sess.mark_incr_comp_session_as_invalid(); + } + } + + let _ = garbage_collect_session_directories(sess); +} + +fn copy_files(target_dir: &Path, + source_dir: &Path, + print_stats_on_success: bool) + -> Result<(), ()> { + // We acquire a shared lock on the lock file of the directory, so that + // nobody deletes it out from under us while we are reading from it. + let lock_file_path = lock_file_path(source_dir); + let _lock = if let Ok(lock) = flock::Lock::new(&lock_file_path, + false, // don't wait, + false, // don't create + false) { // not exclusive + lock + } else { + // Could not acquire the lock, don't try to copy from here + return Err(()) + }; + + let source_dir_iterator = match source_dir.read_dir() { + Ok(it) => it, + Err(_) => return Err(()) + }; + + let mut files_linked = 0; + let mut files_copied = 0; + + for entry in source_dir_iterator { + match entry { + Ok(entry) => { + let file_name = entry.file_name(); + + let target_file_path = target_dir.join(file_name); + let source_path = entry.path(); + + debug!("copying into session dir: {}", source_path.display()); + match fs_util::link_or_copy(source_path, target_file_path) { + Ok(fs_util::LinkOrCopy::Link) => { + files_linked += 1 + } + Ok(fs_util::LinkOrCopy::Copy) => { + files_copied += 1 + } + Err(_) => return Err(()) + } + } + Err(_) => { + return Err(()) + } + } + } + + if print_stats_on_success { + println!("incr. comp. session directory: {} files hard-linked", files_linked); + println!("incr. comp. session directory: {} files copied", files_copied); + } + + Ok(()) +} + +/// Generate unique directory path of the form: +/// {crate_dir}/sess-{timestamp}-{random-number}-working +fn generate_session_dir_path(crate_dir: &Path) -> PathBuf { + let timestamp = timestamp_to_string(SystemTime::now()); + debug!("generate_session_dir_path: timestamp = {}", timestamp); + let random_number = thread_rng().next_u32(); + debug!("generate_session_dir_path: random_number = {}", random_number); + + let directory_name = format!("sess-{}-{:x}-working", timestamp, random_number); + debug!("generate_session_dir_path: directory_name = {}", directory_name); + let directory_path = crate_dir.join(directory_name); + debug!("generate_session_dir_path: directory_path = {}", directory_path.display()); + directory_path +} + +fn create_dir(sess: &Session, path: &Path, dir_tag: &str) -> Result<(),()> { + match fs_util::create_dir_racy(path) { + Ok(()) => { + debug!("{} directory created successfully", dir_tag); + Ok(()) + } + Err(err) => { + sess.err(&format!("Could not create incremental compilation {} \ + directory `{}`: {}", + dir_tag, + path.display(), + err)); + Err(()) + } + } +} + +/// Allocate a the lock-file and lock it. +fn lock_directory(sess: &Session, + session_dir: &Path) + -> Result<(flock::Lock, PathBuf), ()> { + let lock_file_path = lock_file_path(session_dir); + debug!("lock_directory() - lock_file: {}", lock_file_path.display()); + + match flock::Lock::new(&lock_file_path, + false, // don't wait + true, // create the lock file + true) { // the lock should be exclusive + Ok(lock) => Ok((lock, lock_file_path)), + Err(err) => { + sess.err(&format!("incremental compilation: could not create \ + session directory lock file: {}", err)); + Err(()) + } + } +} + +fn delete_session_dir_lock_file(sess: &Session, + lock_file_path: &Path) { + if let Err(err) = std_fs::remove_file(&lock_file_path) { + sess.warn(&format!("Error deleting lock file for incremental \ + compilation session directory `{}`: {}", + lock_file_path.display(), + err)); + } +} + +/// Find the most recent published session directory that is not in the +/// ignore-list. +fn find_source_directory(crate_dir: &Path, + source_directories_already_tried: &FnvHashSet) + -> Option { + let iter = crate_dir.read_dir() + .unwrap() // FIXME + .filter_map(|e| e.ok().map(|e| e.path())); + + find_source_directory_in_iter(iter, source_directories_already_tried) +} + +fn find_source_directory_in_iter(iter: I, + source_directories_already_tried: &FnvHashSet) + -> Option + where I: Iterator +{ + let mut best_candidate = (UNIX_EPOCH, None); + + for session_dir in iter { + debug!("find_source_directory_in_iter - inspecting `{}`", + session_dir.display()); + + let directory_name = session_dir.file_name().unwrap().to_string_lossy(); + assert_no_characters_lost(&directory_name); + + if source_directories_already_tried.contains(&session_dir) || + !is_session_directory(&directory_name) || + !is_finalized(&directory_name) { + debug!("find_source_directory_in_iter - ignoring."); + continue + } + + let timestamp = extract_timestamp_from_session_dir(&directory_name) + .unwrap_or_else(|_| { + bug!("unexpected incr-comp session dir: {}", session_dir.display()) + }); + + if timestamp > best_candidate.0 { + best_candidate = (timestamp, Some(session_dir.clone())); + } + } + + best_candidate.1 +} + +fn is_finalized(directory_name: &str) -> bool { + !directory_name.ends_with("-working") +} + +fn is_session_directory(directory_name: &str) -> bool { + directory_name.starts_with("sess-") && + !directory_name.ends_with(LOCK_FILE_EXT) +} + +fn is_session_directory_lock_file(file_name: &str) -> bool { + file_name.starts_with("sess-") && file_name.ends_with(LOCK_FILE_EXT) +} + +fn extract_timestamp_from_session_dir(directory_name: &str) + -> Result { + if !is_session_directory(directory_name) { + return Err(()) + } + + let dash_indices: Vec<_> = directory_name.match_indices("-") + .map(|(idx, _)| idx) + .collect(); + if dash_indices.len() != 3 { + return Err(()) + } + + string_to_timestamp(&directory_name[dash_indices[0]+1 .. dash_indices[1]]) +} + +fn timestamp_to_string(timestamp: SystemTime) -> String { + let duration = timestamp.duration_since(UNIX_EPOCH).unwrap(); + let micros = duration.as_secs() * 1_000_000 + + (duration.subsec_nanos() as u64) / 1000; + format!("{:x}", micros) +} + +fn string_to_timestamp(s: &str) -> Result { + let micros_since_unix_epoch = u64::from_str_radix(s, 16); + + if micros_since_unix_epoch.is_err() { + return Err(()) + } + + let micros_since_unix_epoch = micros_since_unix_epoch.unwrap(); + + let duration = Duration::new(micros_since_unix_epoch / 1_000_000, + 1000 * (micros_since_unix_epoch % 1_000_000) as u32); + Ok(UNIX_EPOCH + duration) +} + +fn crate_path_tcx(tcx: TyCtxt, cnum: ast::CrateNum) -> PathBuf { + crate_path(tcx.sess, &tcx.crate_name(cnum), &tcx.crate_disambiguator(cnum)) +} + +/// Finds the session directory containing the correct metadata hashes file for +/// the given crate. In order to do that it has to compute the crate directory +/// of the given crate, and in there, look for the session directory with the +/// correct SVH in it. +/// Note that we have to match on the exact SVH here, not just the +/// crate's (name, disambiguator) pair. The metadata hashes are only valid for +/// the exact version of the binary we are reading from now (i.e. the hashes +/// are part of the dependency graph of a specific compilation session). +pub fn find_metadata_hashes_for(tcx: TyCtxt, cnum: ast::CrateNum) -> Option { + let crate_directory = crate_path_tcx(tcx, cnum); + + if !crate_directory.exists() { + return None + } + + let dir_entries = match crate_directory.read_dir() { + Ok(dir_entries) => dir_entries, + Err(e) => { + tcx.sess + .err(&format!("incremental compilation: Could not read crate directory `{}`: {}", + crate_directory.display(), e)); + return None + } + }; + + let target_svh = tcx.sess.cstore.crate_hash(cnum).to_string(); + + let sub_dir = find_metadata_hashes_iter(&target_svh, dir_entries.filter_map(|e| { + e.ok().map(|e| e.file_name().to_string_lossy().into_owned()) + })); + + sub_dir.map(|sub_dir_name| crate_directory.join(&sub_dir_name)) +} + +fn find_metadata_hashes_iter<'a, I>(target_svh: &str, iter: I) -> Option + where I: Iterator +{ + for sub_dir_name in iter { + if !is_session_directory(&sub_dir_name) || !is_finalized(&sub_dir_name) { + // This is not a usable session directory + continue + } + + let is_match = if let Some(last_dash_pos) = sub_dir_name.rfind("-") { + let candidate_svh = &sub_dir_name[last_dash_pos + 1 .. ]; + target_svh == candidate_svh + } else { + // some kind of invalid directory name + continue + }; + + if is_match { + return Some(OsString::from(sub_dir_name)) + } + } + + None +} + +fn crate_path(sess: &Session, + crate_name: &str, + crate_disambiguator: &str) + -> PathBuf { + use std::hash::{SipHasher, Hasher, Hash}; + + let incr_dir = sess.opts.incremental.as_ref().unwrap().clone(); + + // The full crate disambiguator is really long. A hash of it should be + // sufficient. + let mut hasher = SipHasher::new(); + crate_disambiguator.hash(&mut hasher); + + let crate_name = format!("{}-{:x}", crate_name, hasher.finish()); + incr_dir.join(crate_name) +} + +fn assert_no_characters_lost(s: &str) { + if s.contains('\u{FFFD}') { + bug!("Could not losslessly convert '{}'.", s) + } +} + +fn is_old_enough_to_be_collected(timestamp: SystemTime) -> bool { + timestamp < SystemTime::now() - Duration::from_secs(10) +} + +pub fn garbage_collect_session_directories(sess: &Session) -> io::Result<()> { + debug!("garbage_collect_session_directories() - begin"); + + let session_directory = sess.incr_comp_session_dir(); + debug!("garbage_collect_session_directories() - session directory: {}", + session_directory.display()); + + let crate_directory = session_directory.parent().unwrap(); + debug!("garbage_collect_session_directories() - crate directory: {}", + crate_directory.display()); + + // First do a pass over the crate directory, collecting lock files and + // session directories + let mut session_directories = FnvHashSet(); + let mut lock_files = FnvHashSet(); + + for dir_entry in try!(crate_directory.read_dir()) { + let dir_entry = match dir_entry { + Ok(dir_entry) => dir_entry, + _ => { + // Ignore any errors + continue + } + }; + + let entry_name = dir_entry.file_name(); + let entry_name = entry_name.to_string_lossy(); + + if is_session_directory_lock_file(&entry_name) { + assert_no_characters_lost(&entry_name); + lock_files.insert(entry_name.into_owned()); + } else if is_session_directory(&entry_name) { + assert_no_characters_lost(&entry_name); + session_directories.insert(entry_name.into_owned()); + } else { + // This is something we don't know, leave it alone + } + } + + // Now map from lock files to session directories + let lock_file_to_session_dir: FnvHashMap> = + lock_files.into_iter() + .map(|lock_file_name| { + assert!(lock_file_name.ends_with(LOCK_FILE_EXT)); + let dir_prefix_end = lock_file_name.len() - LOCK_FILE_EXT.len(); + let session_dir = { + let dir_prefix = &lock_file_name[0 .. dir_prefix_end]; + session_directories.iter() + .find(|dir_name| dir_name.starts_with(dir_prefix)) + }; + (lock_file_name, session_dir.map(String::clone)) + }) + .collect(); + + // Delete all lock files, that don't have an associated directory. They must + // be some kind of leftover + for (lock_file_name, directory_name) in &lock_file_to_session_dir { + if directory_name.is_none() { + let timestamp = match extract_timestamp_from_session_dir(lock_file_name) { + Ok(timestamp) => timestamp, + Err(()) => { + debug!("Found lock-file with malformed timestamp: {}", + crate_directory.join(&lock_file_name).display()); + // Ignore it + continue + } + }; + + let lock_file_path = crate_directory.join(&**lock_file_name); + + if is_old_enough_to_be_collected(timestamp) { + debug!("garbage_collect_session_directories() - deleting \ + garbage lock file: {}", lock_file_path.display()); + delete_session_dir_lock_file(sess, &lock_file_path); + } else { + debug!("garbage_collect_session_directories() - lock file with \ + no session dir not old enough to be collected: {}", + lock_file_path.display()); + } + } + } + + // Filter out `None` directories + let lock_file_to_session_dir: FnvHashMap = + lock_file_to_session_dir.into_iter() + .filter_map(|(lock_file_name, directory_name)| { + directory_name.map(|n| (lock_file_name, n)) + }) + .collect(); + + let mut deletion_candidates = vec![]; + let mut definitely_delete = vec![]; + + for (lock_file_name, directory_name) in &lock_file_to_session_dir { + debug!("garbage_collect_session_directories() - inspecting: {}", + directory_name); + + let timestamp = match extract_timestamp_from_session_dir(directory_name) { + Ok(timestamp) => timestamp, + Err(()) => { + debug!("Found session-dir with malformed timestamp: {}", + crate_directory.join(directory_name).display()); + // Ignore it + continue + } + }; + + if is_finalized(directory_name) { + let lock_file_path = crate_directory.join(lock_file_name); + match flock::Lock::new(&lock_file_path, + false, // don't wait + false, // don't create the lock-file + true) { // get an exclusive lock + Ok(lock) => { + debug!("garbage_collect_session_directories() - \ + successfully acquired lock"); + debug!("garbage_collect_session_directories() - adding \ + deletion candidate: {}", directory_name); + + // Note that we are holding on to the lock + deletion_candidates.push((timestamp, + crate_directory.join(directory_name), + Some(lock))); + } + Err(_) => { + debug!("garbage_collect_session_directories() - \ + not collecting, still in use"); + } + } + } else if is_old_enough_to_be_collected(timestamp) { + // When cleaning out "-working" session directories, i.e. + // session directories that might still be in use by another + // compiler instance, we only look a directories that are + // at least ten seconds old. This is supposed to reduce the + // chance of deleting a directory in the time window where + // the process has allocated the directory but has not yet + // acquired the file-lock on it. + + // Try to acquire the directory lock. If we can't, it + // means that the owning process is still alive and we + // leave this directory alone. + let lock_file_path = crate_directory.join(lock_file_name); + match flock::Lock::new(&lock_file_path, + false, // don't wait + false, // don't create the lock-file + true) { // get an exclusive lock + Ok(lock) => { + debug!("garbage_collect_session_directories() - \ + successfully acquired lock"); + + // Note that we are holding on to the lock + definitely_delete.push((crate_directory.join(directory_name), + Some(lock))); + } + Err(_) => { + debug!("garbage_collect_session_directories() - \ + not collecting, still in use"); + } + } + } else { + debug!("garbage_collect_session_directories() - not finalized, not \ + old enough"); + } + } + + // Delete all but the most recent of the candidates + for (path, lock) in all_except_most_recent(deletion_candidates) { + debug!("garbage_collect_session_directories() - deleting `{}`", + path.display()); + + if let Err(err) = std_fs::remove_dir_all(&path) { + sess.warn(&format!("Failed to garbage collect finalized incremental \ + compilation session directory `{}`: {}", + path.display(), + err)); + } else { + delete_session_dir_lock_file(sess, &lock_file_path(&path)); + } + + + // Let's make it explicit that the file lock is released at this point, + // or rather, that we held on to it until here + mem::drop(lock); + } + + for (path, lock) in definitely_delete { + debug!("garbage_collect_session_directories() - deleting `{}`", + path.display()); + + if let Err(err) = std_fs::remove_dir_all(&path) { + sess.warn(&format!("Failed to garbage collect incremental \ + compilation session directory `{}`: {}", + path.display(), + err)); + } else { + delete_session_dir_lock_file(sess, &lock_file_path(&path)); + } + + // Let's make it explicit that the file lock is released at this point, + // or rather, that we held on to it until here + mem::drop(lock); + } + + Ok(()) +} + +fn all_except_most_recent(deletion_candidates: Vec<(SystemTime, PathBuf, Option)>) + -> FnvHashMap> { + let most_recent = deletion_candidates.iter() + .map(|&(timestamp, _, _)| timestamp) + .max(); + + if let Some(most_recent) = most_recent { + deletion_candidates.into_iter() + .filter(|&(timestamp, _, _)| timestamp != most_recent) + .map(|(_, path, lock)| (path, lock)) + .collect() + } else { + FnvHashMap() + } +} + +#[test] +fn test_all_except_most_recent() { + assert_eq!(all_except_most_recent( + vec![ + (UNIX_EPOCH + Duration::new(4, 0), PathBuf::from("4"), None), + (UNIX_EPOCH + Duration::new(1, 0), PathBuf::from("1"), None), + (UNIX_EPOCH + Duration::new(5, 0), PathBuf::from("5"), None), + (UNIX_EPOCH + Duration::new(3, 0), PathBuf::from("3"), None), + (UNIX_EPOCH + Duration::new(2, 0), PathBuf::from("2"), None), + ]).keys().cloned().collect::>(), + vec![ + PathBuf::from("1"), + PathBuf::from("2"), + PathBuf::from("3"), + PathBuf::from("4"), + ].into_iter().collect::>() + ); + + assert_eq!(all_except_most_recent( + vec![ + ]).keys().cloned().collect::>(), + FnvHashSet() + ); +} + +#[test] +fn test_timestamp_serialization() { + for i in 0 .. 1_000u64 { + let time = UNIX_EPOCH + Duration::new(i * 3_434_578, (i as u32) * 239_000); + let s = timestamp_to_string(time); + assert_eq!(time, string_to_timestamp(&s).unwrap()); + } +} + +#[test] +fn test_find_source_directory_in_iter() { + let already_visited = FnvHashSet(); + + // Find newest + assert_eq!(find_source_directory_in_iter( + vec![PathBuf::from("crate-dir/sess-3234-0000-svh"), + PathBuf::from("crate-dir/sess-2234-0000-svh"), + PathBuf::from("crate-dir/sess-1234-0000-svh")].into_iter(), &already_visited), + Some(PathBuf::from("crate-dir/sess-3234-0000-svh"))); + + // Filter out "-working" + assert_eq!(find_source_directory_in_iter( + vec![PathBuf::from("crate-dir/sess-3234-0000-working"), + PathBuf::from("crate-dir/sess-2234-0000-svh"), + PathBuf::from("crate-dir/sess-1234-0000-svh")].into_iter(), &already_visited), + Some(PathBuf::from("crate-dir/sess-2234-0000-svh"))); + + // Handle empty + assert_eq!(find_source_directory_in_iter(vec![].into_iter(), &already_visited), + None); + + // Handle only working + assert_eq!(find_source_directory_in_iter( + vec![PathBuf::from("crate-dir/sess-3234-0000-working"), + PathBuf::from("crate-dir/sess-2234-0000-working"), + PathBuf::from("crate-dir/sess-1234-0000-working")].into_iter(), &already_visited), + None); +} + +#[test] +fn test_find_metadata_hashes_iter() +{ + assert_eq!(find_metadata_hashes_iter("testsvh2", + vec![ + String::from("sess-timestamp1-testsvh1"), + String::from("sess-timestamp2-testsvh2"), + String::from("sess-timestamp3-testsvh3"), + ].into_iter()), + Some(OsString::from("sess-timestamp2-testsvh2")) + ); + + assert_eq!(find_metadata_hashes_iter("testsvh2", + vec![ + String::from("sess-timestamp1-testsvh1"), + String::from("sess-timestamp2-testsvh2"), + String::from("invalid-name"), + ].into_iter()), + Some(OsString::from("sess-timestamp2-testsvh2")) + ); + + assert_eq!(find_metadata_hashes_iter("testsvh2", + vec![ + String::from("sess-timestamp1-testsvh1"), + String::from("sess-timestamp2-testsvh2-working"), + String::from("sess-timestamp3-testsvh3"), + ].into_iter()), + None + ); + + assert_eq!(find_metadata_hashes_iter("testsvh1", + vec![ + String::from("sess-timestamp1-random1-working"), + String::from("sess-timestamp2-random2-working"), + String::from("sess-timestamp3-random3-working"), + ].into_iter()), + None + ); + + assert_eq!(find_metadata_hashes_iter("testsvh2", + vec![ + String::from("timestamp1-testsvh2"), + String::from("timestamp2-testsvh2"), + String::from("timestamp3-testsvh2"), + ].into_iter()), + None + ); +} diff --git a/src/librustc_incremental/persist/hash.rs b/src/librustc_incremental/persist/hash.rs index 344b05f095457..6bb7ba61c6d2c 100644 --- a/src/librustc_incremental/persist/hash.rs +++ b/src/librustc_incremental/persist/hash.rs @@ -16,13 +16,14 @@ use rustc::hir::def_id::DefId; use rustc::hir::svh::Svh; use rustc::ty::TyCtxt; use rustc_data_structures::fnv::FnvHashMap; +use rustc_data_structures::flock; use rustc_serialize::Decodable; use std::io::{ErrorKind, Read}; use std::fs::File; use syntax::ast; use super::data::*; -use super::util::*; +use super::fs::*; pub struct HashContext<'a, 'tcx: 'a> { pub tcx: TyCtxt<'a, 'tcx, 'tcx>, @@ -124,19 +125,43 @@ impl<'a, 'tcx> HashContext<'a, 'tcx> { debug!("load_data: svh={}", svh); assert!(old.is_none(), "loaded data for crate {:?} twice", cnum); - if let Some(path) = metadata_hash_path(self.tcx, cnum) { - debug!("load_data: path={:?}", path); + if let Some(session_dir) = find_metadata_hashes_for(self.tcx, cnum) { + debug!("load_data: session_dir={:?}", session_dir); + + // Lock the directory we'll be reading the hashes from. + let lock_file_path = lock_file_path(&session_dir); + let _lock = match flock::Lock::new(&lock_file_path, + false, // don't wait + false, // don't create the lock-file + false) { // shared lock + Ok(lock) => lock, + Err(err) => { + debug!("Could not acquire lock on `{}` while trying to \ + load metadata hashes: {}", + lock_file_path.display(), + err); + + // Could not acquire the lock. The directory is probably in + // in the process of being deleted. It's OK to just exit + // here. It's the same scenario as if the file had not + // existed in the first place. + return + } + }; + + let hashes_file_path = metadata_hash_import_path(&session_dir); + let mut data = vec![]; match - File::open(&path) - .and_then(|mut file| file.read_to_end(&mut data)) + File::open(&hashes_file_path) + .and_then(|mut file| file.read_to_end(&mut data)) { Ok(_) => { - match self.load_from_data(cnum, &data) { + match self.load_from_data(cnum, &data, svh) { Ok(()) => { } Err(err) => { bug!("decoding error in dep-graph from `{}`: {}", - path.display(), err); + &hashes_file_path.display(), err); } } } @@ -148,7 +173,7 @@ impl<'a, 'tcx> HashContext<'a, 'tcx> { _ => { self.tcx.sess.err( &format!("could not load dep information from `{}`: {}", - path.display(), err)); + hashes_file_path.display(), err)); return; } } @@ -157,11 +182,22 @@ impl<'a, 'tcx> HashContext<'a, 'tcx> { } } - fn load_from_data(&mut self, cnum: ast::CrateNum, data: &[u8]) -> Result<(), Error> { + fn load_from_data(&mut self, + cnum: ast::CrateNum, + data: &[u8], + expected_svh: Svh) -> Result<(), Error> { debug!("load_from_data(cnum={})", cnum); // Load up the hashes for the def-ids from this crate. let mut decoder = Decoder::new(data, 0); + let svh_in_hashes_file = try!(Svh::decode(&mut decoder)); + + if svh_in_hashes_file != expected_svh { + // We should not be able to get here. If we do, then + // `fs::find_metadata_hashes_for()` has messed up. + bug!("mismatch between SVH in crate and SVH in incr. comp. hashes") + } + let serialized_hashes = try!(SerializedMetadataHashes::decode(&mut decoder)); for serialized_hash in serialized_hashes.hashes { // the hashes are stored with just a def-index, which is diff --git a/src/librustc_incremental/persist/load.rs b/src/librustc_incremental/persist/load.rs index c736437df1a9e..bab3a2a4f6d01 100644 --- a/src/librustc_incremental/persist/load.rs +++ b/src/librustc_incremental/persist/load.rs @@ -26,7 +26,7 @@ use super::data::*; use super::directory::*; use super::dirty_clean; use super::hash::*; -use super::util::*; +use super::fs::*; pub type DirtyNodes = FnvHashSet>; @@ -43,18 +43,36 @@ pub fn load_dep_graph<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) { return; } + match prepare_session_directory(tcx) { + Ok(true) => { + // We successfully allocated a session directory and there is + // something in it to load, so continue + } + Ok(false) => { + // We successfully allocated a session directory, but there is no + // dep-graph data in it to load (because this is the first + // compilation session with this incr. comp. dir.) + return + } + Err(()) => { + // Something went wrong while trying to allocate the session + // directory. Don't try to use it any further. + return + } + } + let _ignore = tcx.dep_graph.in_ignore(); load_dep_graph_if_exists(tcx); } fn load_dep_graph_if_exists<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) { - let dep_graph_path = dep_graph_path(tcx).unwrap(); + let dep_graph_path = dep_graph_path(tcx.sess); let dep_graph_data = match load_data(tcx.sess, &dep_graph_path) { Some(p) => p, None => return // no file }; - let work_products_path = tcx_work_products_path(tcx).unwrap(); + let work_products_path = work_products_path(tcx.sess); let work_products_data = match load_data(tcx.sess, &work_products_path) { Some(p) => p, None => return // no file @@ -250,7 +268,7 @@ fn reconcile_work_products<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, .saved_files .iter() .all(|&(_, ref file_name)| { - let path = in_incr_comp_dir(tcx.sess, &file_name).unwrap(); + let path = in_incr_comp_dir_sess(tcx.sess, &file_name); path.exists() }); if all_files_exist { @@ -268,7 +286,7 @@ fn delete_dirty_work_product(tcx: TyCtxt, swp: SerializedWorkProduct) { debug!("delete_dirty_work_product({:?})", swp); for &(_, ref file_name) in &swp.work_product.saved_files { - let path = in_incr_comp_dir(tcx.sess, file_name).unwrap(); + let path = in_incr_comp_dir_sess(tcx.sess, file_name); match fs::remove_file(&path) { Ok(()) => { } Err(err) => { diff --git a/src/librustc_incremental/persist/mod.rs b/src/librustc_incremental/persist/mod.rs index 4a042497e0441..ba0f71971bb45 100644 --- a/src/librustc_incremental/persist/mod.rs +++ b/src/librustc_incremental/persist/mod.rs @@ -15,15 +15,16 @@ mod data; mod directory; mod dirty_clean; +mod fs; mod hash; mod load; mod preds; mod save; -mod util; mod work_product; +pub use self::fs::finalize_session_directory; +pub use self::fs::in_incr_comp_dir; pub use self::load::load_dep_graph; pub use self::save::save_dep_graph; pub use self::save::save_work_products; pub use self::work_product::save_trans_partition; -pub use self::util::in_incr_comp_dir; diff --git a/src/librustc_incremental/persist/save.rs b/src/librustc_incremental/persist/save.rs index a9523a81fbaf7..df362f8404317 100644 --- a/src/librustc_incremental/persist/save.rs +++ b/src/librustc_incremental/persist/save.rs @@ -11,7 +11,7 @@ use rbml::opaque::Encoder; use rustc::dep_graph::DepNode; use rustc::hir::def_id::DefId; -use rustc::middle::cstore::LOCAL_CRATE; +use rustc::hir::svh::Svh; use rustc::session::Session; use rustc::ty::TyCtxt; use rustc_data_structures::fnv::FnvHashMap; @@ -20,14 +20,14 @@ use std::hash::{Hash, Hasher, SipHasher}; use std::io::{self, Cursor, Write}; use std::fs::{self, File}; use std::path::PathBuf; - use super::data::*; use super::directory::*; use super::hash::*; use super::preds::*; -use super::util::*; +use super::fs::*; -pub fn save_dep_graph<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) { +pub fn save_dep_graph<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + svh: Svh) { debug!("save_dep_graph()"); let _ignore = tcx.dep_graph.in_ignore(); let sess = tcx.sess; @@ -39,31 +39,31 @@ pub fn save_dep_graph<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) { let query = tcx.dep_graph.query(); let preds = Predecessors::new(&query, &mut hcx); save_in(sess, - dep_graph_path(tcx), + dep_graph_path(sess), |e| encode_dep_graph(&preds, &mut builder, e)); save_in(sess, - metadata_hash_path(tcx, LOCAL_CRATE), - |e| encode_metadata_hashes(tcx, &preds, &mut builder, e)); + metadata_hash_export_path(sess), + |e| encode_metadata_hashes(tcx, svh, &preds, &mut builder, e)); } -pub fn save_work_products(sess: &Session, local_crate_name: &str) { +pub fn save_work_products(sess: &Session) { + if sess.opts.incremental.is_none() { + return; + } + debug!("save_work_products()"); let _ignore = sess.dep_graph.in_ignore(); - let path = sess_work_products_path(sess, local_crate_name); + let path = work_products_path(sess); save_in(sess, path, |e| encode_work_products(sess, e)); } -fn save_in(sess: &Session, opt_path_buf: Option, encode: F) +fn save_in(sess: &Session, path_buf: PathBuf, encode: F) where F: FnOnce(&mut Encoder) -> io::Result<()> { - let path_buf = match opt_path_buf { - Some(p) => p, - None => return, - }; - - // FIXME(#32754) lock file? - // delete the old dep-graph, if any + // Note: It's important that we actually delete the old file and not just + // truncate and overwrite it, since it might be a shared hard-link, the + // underlying data of which we don't want to modify if path_buf.exists() { match fs::remove_file(&path_buf) { Ok(()) => {} @@ -153,6 +153,7 @@ pub fn encode_dep_graph(preds: &Predecessors, } pub fn encode_metadata_hashes(tcx: TyCtxt, + svh: Svh, preds: &Predecessors, builder: &mut DefIdDirectoryBuilder, encoder: &mut Encoder) @@ -218,6 +219,7 @@ pub fn encode_metadata_hashes(tcx: TyCtxt, } // Encode everything. + try!(svh.encode(encoder)); try!(serialized_hashes.encode(encoder)); Ok(()) diff --git a/src/librustc_incremental/persist/util.rs b/src/librustc_incremental/persist/util.rs deleted file mode 100644 index f1e81fdb266b9..0000000000000 --- a/src/librustc_incremental/persist/util.rs +++ /dev/null @@ -1,95 +0,0 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use rustc::middle::cstore::LOCAL_CRATE; -use rustc::session::Session; -use rustc::ty::TyCtxt; - -use std::fs; -use std::io; -use std::path::{Path, PathBuf}; -use syntax::ast; - -pub fn dep_graph_path(tcx: TyCtxt) -> Option { - tcx_path(tcx, LOCAL_CRATE, "local") -} - -pub fn metadata_hash_path(tcx: TyCtxt, cnum: ast::CrateNum) -> Option { - tcx_path(tcx, cnum, "metadata") -} - -pub fn tcx_work_products_path(tcx: TyCtxt) -> Option { - let crate_name = tcx.crate_name(LOCAL_CRATE); - sess_work_products_path(tcx.sess, &crate_name) -} - -pub fn sess_work_products_path(sess: &Session, - local_crate_name: &str) - -> Option { - let crate_disambiguator = sess.local_crate_disambiguator(); - path(sess, local_crate_name, &crate_disambiguator, "work-products") -} - -pub fn in_incr_comp_dir(sess: &Session, file_name: &str) -> Option { - sess.opts.incremental.as_ref().map(|incr_dir| incr_dir.join(file_name)) -} - -fn tcx_path(tcx: TyCtxt, - cnum: ast::CrateNum, - middle: &str) - -> Option { - path(tcx.sess, &tcx.crate_name(cnum), &tcx.crate_disambiguator(cnum), middle) -} - -fn path(sess: &Session, - crate_name: &str, - crate_disambiguator: &str, - middle: &str) - -> Option { - // For now, just save/load dep-graph from - // directory/dep_graph.rbml - sess.opts.incremental.as_ref().and_then(|incr_dir| { - match create_dir_racy(&incr_dir) { - Ok(()) => {} - Err(err) => { - sess.err( - &format!("could not create the directory `{}`: {}", - incr_dir.display(), err)); - return None; - } - } - - let file_name = format!("{}-{}.{}.bin", crate_name, crate_disambiguator, middle); - - Some(incr_dir.join(file_name)) - }) -} - -// Like std::fs::create_dir_all, except handles concurrent calls among multiple -// threads or processes. -fn create_dir_racy(path: &Path) -> io::Result<()> { - match fs::create_dir(path) { - Ok(()) => return Ok(()), - Err(ref e) if e.kind() == io::ErrorKind::AlreadyExists => return Ok(()), - Err(ref e) if e.kind() == io::ErrorKind::NotFound => {} - Err(e) => return Err(e), - } - match path.parent() { - Some(p) => try!(create_dir_racy(p)), - None => return Err(io::Error::new(io::ErrorKind::Other, - "failed to create whole tree")), - } - match fs::create_dir(path) { - Ok(()) => Ok(()), - Err(ref e) if e.kind() == io::ErrorKind::AlreadyExists => Ok(()), - Err(e) => Err(e), - } -} - diff --git a/src/librustc_incremental/persist/work_product.rs b/src/librustc_incremental/persist/work_product.rs index c106ea8f26269..a9ebd27ce9928 100644 --- a/src/librustc_incremental/persist/work_product.rs +++ b/src/librustc_incremental/persist/work_product.rs @@ -10,7 +10,7 @@ //! This module contains files for saving intermediate work-products. -use persist::util::*; +use persist::fs::*; use rustc::dep_graph::{WorkProduct, WorkProductId}; use rustc::session::Session; use rustc::session::config::OutputType; @@ -35,7 +35,7 @@ pub fn save_trans_partition(sess: &Session, files.iter() .map(|&(kind, ref path)| { let file_name = format!("cgu-{}.{}", cgu_name, kind.extension()); - let path_in_incr_dir = in_incr_comp_dir(sess, &file_name).unwrap(); + let path_in_incr_dir = in_incr_comp_dir_sess(sess, &file_name); match link_or_copy(path, &path_in_incr_dir) { Ok(_) => Some((kind, file_name)), Err(err) => { diff --git a/src/librustc_llvm/ffi.rs b/src/librustc_llvm/ffi.rs index b2ffcac365bad..754910c246d6f 100644 --- a/src/librustc_llvm/ffi.rs +++ b/src/librustc_llvm/ffi.rs @@ -1796,6 +1796,11 @@ extern { Col: c_uint) -> DILexicalBlock; + pub fn LLVMRustDIBuilderCreateLexicalBlockFile(Builder: DIBuilderRef, + Scope: DIScope, + File: DIFile) + -> DILexicalBlock; + pub fn LLVMRustDIBuilderCreateStaticVariable(Builder: DIBuilderRef, Context: DIScope, Name: *const c_char, diff --git a/src/librustc_metadata/loader.rs b/src/librustc_metadata/loader.rs index 2345cd9a92aea..b2c87db8ef566 100644 --- a/src/librustc_metadata/loader.rs +++ b/src/librustc_metadata/loader.rs @@ -221,6 +221,7 @@ use rustc::session::Session; use rustc::session::filesearch::{FileSearch, FileMatches, FileDoesntMatch}; use rustc::session::search_paths::PathKind; use rustc::util::common; +use rustc::util::nodemap::FnvHashMap; use rustc_llvm as llvm; use rustc_llvm::{False, ObjectFile, mk_section_iter}; @@ -230,7 +231,6 @@ use syntax_pos::Span; use rustc_back::target::Target; use std::cmp; -use std::collections::HashMap; use std::fmt; use std::fs; use std::io; @@ -413,7 +413,7 @@ impl<'a> Context<'a> { let rlib_prefix = format!("lib{}", self.crate_name); let staticlib_prefix = format!("{}{}", staticpair.0, self.crate_name); - let mut candidates = HashMap::new(); + let mut candidates = FnvHashMap(); let mut staticlibs = vec!(); // First, find all possible candidate rlibs and dylibs purely based on @@ -456,7 +456,7 @@ impl<'a> Context<'a> { let hash_str = hash.to_string(); let slot = candidates.entry(hash_str) - .or_insert_with(|| (HashMap::new(), HashMap::new())); + .or_insert_with(|| (FnvHashMap(), FnvHashMap())); let (ref mut rlibs, ref mut dylibs) = *slot; fs::canonicalize(path).map(|p| { if rlib { @@ -477,7 +477,7 @@ impl<'a> Context<'a> { // A Library candidate is created if the metadata for the set of // libraries corresponds to the crate id and hash criteria that this // search is being performed for. - let mut libraries = HashMap::new(); + let mut libraries = FnvHashMap(); for (_hash, (rlibs, dylibs)) in candidates { let mut slot = None; let rlib = self.extract_one(rlibs, CrateFlavor::Rlib, &mut slot); @@ -527,7 +527,7 @@ impl<'a> Context<'a> { // read the metadata from it if `*slot` is `None`. If the metadata couldn't // be read, it is assumed that the file isn't a valid rust library (no // errors are emitted). - fn extract_one(&mut self, m: HashMap, flavor: CrateFlavor, + fn extract_one(&mut self, m: FnvHashMap, flavor: CrateFlavor, slot: &mut Option<(Svh, MetadataBlob)>) -> Option<(PathBuf, PathKind)> { let mut ret: Option<(PathBuf, PathKind)> = None; let mut error = 0; @@ -669,8 +669,8 @@ impl<'a> Context<'a> { // rlibs/dylibs. let sess = self.sess; let dylibname = self.dylibname(); - let mut rlibs = HashMap::new(); - let mut dylibs = HashMap::new(); + let mut rlibs = FnvHashMap(); + let mut dylibs = FnvHashMap(); { let locs = locs.map(|l| PathBuf::from(l)).filter(|loc| { if !loc.exists() { diff --git a/src/librustc_metadata/macro_import.rs b/src/librustc_metadata/macro_import.rs index b2a2dcf90fa4b..2cd60f04e69a1 100644 --- a/src/librustc_metadata/macro_import.rs +++ b/src/librustc_metadata/macro_import.rs @@ -14,8 +14,8 @@ use creader::CrateReader; use cstore::CStore; use rustc::session::Session; +use rustc::util::nodemap::{FnvHashSet, FnvHashMap}; -use std::collections::{HashSet, HashMap}; use syntax::parse::token; use syntax::ast; use syntax::attr; @@ -45,13 +45,13 @@ pub fn call_bad_macro_reexport(a: &Session, b: Span) { span_err!(a, b, E0467, "bad macro reexport"); } -pub type MacroSelection = HashMap; +pub type MacroSelection = FnvHashMap; impl<'a> ext::base::MacroLoader for MacroLoader<'a> { fn load_crate(&mut self, extern_crate: &ast::Item, allows_macros: bool) -> Vec { // Parse the attributes relating to macros. - let mut import = Some(HashMap::new()); // None => load all - let mut reexport = HashMap::new(); + let mut import = Some(FnvHashMap()); // None => load all + let mut reexport = FnvHashMap(); for attr in &extern_crate.attrs { let mut used = true; @@ -120,7 +120,7 @@ impl<'a> MacroLoader<'a> { } let mut macros = Vec::new(); - let mut seen = HashSet::new(); + let mut seen = FnvHashSet(); for mut def in self.reader.read_exported_macros(vi) { let name = def.ident.name.as_str(); diff --git a/src/librustc_mir/diagnostics.rs b/src/librustc_mir/diagnostics.rs index 4a731d898a937..eb16812af9b02 100644 --- a/src/librustc_mir/diagnostics.rs +++ b/src/librustc_mir/diagnostics.rs @@ -18,7 +18,7 @@ for the entire lifetime of a program. Creating a boxed value allocates memory on the heap at runtime, and therefore cannot be done at compile time. Erroneous code example: -```compile_fail +```compile_fail,E0010 #![feature(box_syntax)] const CON : Box = box 0; @@ -30,7 +30,7 @@ Static and const variables can refer to other const variables. But a const variable cannot refer to a static variable. For example, `Y` cannot refer to `X` here: -```compile_fail +```compile_fail,E0013 static X: i32 = 42; const Y: i32 = X; ``` @@ -66,7 +66,7 @@ E0016: r##" Blocks in constants may only contain items (such as constant, function definition, etc...) and a tail expression. Erroneous code example: -```compile_fail +```compile_fail,E0016 const FOO: i32 = { let x = 0; x }; // 'x' isn't an item! ``` @@ -81,7 +81,7 @@ E0017: r##" References in statics and constants may only refer to immutable values. Erroneous code example: -```compile_fail +```compile_fail,E0017 static X: i32 = 1; const C: i32 = 2; @@ -107,7 +107,7 @@ vary. For example, if you write: -```compile_fail +```compile_fail,E0018 static MY_STATIC: u32 = 42; static MY_STATIC_ADDR: usize = &MY_STATIC as *const _ as usize; static WHAT: usize = (MY_STATIC_ADDR^17) + MY_STATIC_ADDR; @@ -152,7 +152,7 @@ impl Test { fn main() { const FOO: Test = Test::V1; - const A: i32 = FOO.test(); // You can't call Test::func() here ! + const A: i32 = FOO.test(); // You can't call Test::func() here! } ``` @@ -214,14 +214,13 @@ static B: &'static u32 = &A; // ok! ``` "##, - E0395: r##" The value assigned to a constant scalar must be known at compile time, which is not the case when comparing raw pointers. Erroneous code example: -```compile_fail +```compile_fail,E0395 static FOO: i32 = 42; static BAR: i32 = 42; @@ -250,7 +249,7 @@ The value behind a raw pointer can't be determined at compile-time (or even link-time), which means it can't be used in a constant expression. Erroneous code example: -```compile_fail +```compile_fail,E0396 const REG_ADDR: *const u8 = 0x5f3759df as *const u8; const VALUE: u8 = unsafe { *REG_ADDR }; @@ -272,7 +271,7 @@ E0492: r##" A borrow of a constant containing interior mutability was attempted. Erroneous code example: -```compile_fail +```compile_fail,E0492 use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT}; const A: AtomicUsize = ATOMIC_USIZE_INIT; @@ -299,7 +298,7 @@ static B: &'static AtomicUsize = &A; // ok! You can also have this error while using a cell type: -```compile_fail +```compile_fail,E0492 #![feature(const_fn)] use std::cell::Cell; @@ -351,7 +350,7 @@ E0493: r##" A type with a destructor was assigned to an invalid type of variable. Erroneous code example: -```compile_fail +```compile_fail,E0493 struct Foo { a: u32 } @@ -374,7 +373,7 @@ E0494: r##" A reference of an interior static was assigned to another const/static. Erroneous code example: -```compile_fail +```compile_fail,E0494 struct Foo { a: u32 } diff --git a/src/librustc_resolve/assign_ids.rs b/src/librustc_resolve/assign_ids.rs index 70e566de8a7be..a9e3c6ffe9ed8 100644 --- a/src/librustc_resolve/assign_ids.rs +++ b/src/librustc_resolve/assign_ids.rs @@ -10,6 +10,7 @@ use Resolver; use rustc::session::Session; +use rustc::util::nodemap::FnvHashMap; use syntax::ast; use syntax::ext::hygiene::Mark; use syntax::fold::{self, Folder}; @@ -17,7 +18,6 @@ use syntax::ptr::P; use syntax::util::move_map::MoveMap; use syntax::util::small_vector::SmallVector; -use std::collections::HashMap; use std::mem; impl<'a> Resolver<'a> { @@ -31,7 +31,7 @@ impl<'a> Resolver<'a> { struct NodeIdAssigner<'a> { sess: &'a Session, - macros_at_scope: &'a mut HashMap>, + macros_at_scope: &'a mut FnvHashMap>, } impl<'a> Folder for NodeIdAssigner<'a> { diff --git a/src/librustc_resolve/lib.rs b/src/librustc_resolve/lib.rs index 5641a50ccaccf..6a4a48377c783 100644 --- a/src/librustc_resolve/lib.rs +++ b/src/librustc_resolve/lib.rs @@ -68,7 +68,6 @@ use syntax::ast::{PathSegment, PathParameters, QSelf, TraitItemKind, TraitRef, T use syntax_pos::Span; use errors::DiagnosticBuilder; -use std::collections::{HashMap, HashSet}; use std::cell::{Cell, RefCell}; use std::fmt; use std::mem::replace; @@ -498,7 +497,7 @@ struct BindingInfo { } // Map from the name in a pattern to its binding mode. -type BindingMap = HashMap; +type BindingMap = FnvHashMap; #[derive(Copy, Clone, PartialEq, Eq, Debug)] enum PatternSource { @@ -703,14 +702,14 @@ enum ModulePrefixResult<'a> { /// One local scope. #[derive(Debug)] struct Rib<'a> { - bindings: HashMap, + bindings: FnvHashMap, kind: RibKind<'a>, } impl<'a> Rib<'a> { fn new(kind: RibKind<'a>) -> Rib<'a> { Rib { - bindings: HashMap::new(), + bindings: FnvHashMap(), kind: kind, } } @@ -773,7 +772,7 @@ pub struct ModuleS<'a> { // is the NodeId of the local `extern crate` item (otherwise, `extern_crate_id` is None). extern_crate_id: Option, - resolutions: RefCell>>>, + resolutions: RefCell>>>, no_implicit_prelude: Cell, @@ -797,7 +796,7 @@ impl<'a> ModuleS<'a> { parent_link: parent_link, def: def, extern_crate_id: None, - resolutions: RefCell::new(HashMap::new()), + resolutions: RefCell::new(FnvHashMap()), no_implicit_prelude: Cell::new(false), glob_importers: RefCell::new(Vec::new()), globs: RefCell::new((Vec::new())), @@ -930,12 +929,12 @@ impl<'a> NameBinding<'a> { /// Interns the names of the primitive types. struct PrimitiveTypeTable { - primitive_types: HashMap, + primitive_types: FnvHashMap, } impl PrimitiveTypeTable { fn new() -> PrimitiveTypeTable { - let mut table = PrimitiveTypeTable { primitive_types: HashMap::new() }; + let mut table = PrimitiveTypeTable { primitive_types: FnvHashMap() }; table.intern("bool", TyBool); table.intern("char", TyChar); @@ -969,7 +968,7 @@ pub struct Resolver<'a> { // Maps the node id of a statement to the expansions of the `macro_rules!`s // immediately above the statement (if appropriate). - macros_at_scope: HashMap>, + macros_at_scope: FnvHashMap>, graph_root: Module<'a>, @@ -1043,8 +1042,8 @@ pub struct Resolver<'a> { // all imports, but only glob imports are actually interesting). pub glob_map: GlobMap, - used_imports: HashSet<(NodeId, Namespace)>, - used_crates: HashSet, + used_imports: FnvHashSet<(NodeId, Namespace)>, + used_crates: FnvHashSet, pub maybe_unused_trait_imports: NodeSet, privacy_errors: Vec>, @@ -1164,7 +1163,7 @@ impl<'a> Resolver<'a> { session: session, definitions: Definitions::new(), - macros_at_scope: HashMap::new(), + macros_at_scope: FnvHashMap(), // The outermost module has def ID 0; this is not reflected in the // AST. @@ -1199,8 +1198,8 @@ impl<'a> Resolver<'a> { make_glob_map: make_glob_map == MakeGlobMap::Yes, glob_map: NodeMap(), - used_imports: HashSet::new(), - used_crates: HashSet::new(), + used_imports: FnvHashSet(), + used_crates: FnvHashSet(), maybe_unused_trait_imports: NodeSet(), privacy_errors: Vec::new(), @@ -1729,7 +1728,7 @@ impl<'a> Resolver<'a> { match type_parameters { HasTypeParameters(generics, rib_kind) => { let mut function_type_rib = Rib::new(rib_kind); - let mut seen_bindings = HashMap::new(); + let mut seen_bindings = FnvHashMap(); for type_parameter in &generics.ty_params { let name = type_parameter.ident.name; debug!("with_type_parameter_rib: {}", type_parameter.id); @@ -1793,7 +1792,7 @@ impl<'a> Resolver<'a> { self.label_ribs.push(Rib::new(rib_kind)); // Add each argument to the rib. - let mut bindings_list = HashMap::new(); + let mut bindings_list = FnvHashMap(); for argument in &declaration.inputs { self.resolve_pattern(&argument.pat, PatternSource::FnParam, &mut bindings_list); @@ -1994,7 +1993,7 @@ impl<'a> Resolver<'a> { walk_list!(self, visit_expr, &local.init); // Resolve the pattern. - self.resolve_pattern(&local.pat, PatternSource::Let, &mut HashMap::new()); + self.resolve_pattern(&local.pat, PatternSource::Let, &mut FnvHashMap()); } // build a map from pattern identifiers to binding-info's. @@ -2002,7 +2001,7 @@ impl<'a> Resolver<'a> { // that expands into an or-pattern where one 'x' was from the // user and one 'x' came from the macro. fn binding_mode_map(&mut self, pat: &Pat) -> BindingMap { - let mut binding_map = HashMap::new(); + let mut binding_map = FnvHashMap(); pat.walk(&mut |pat| { if let PatKind::Ident(binding_mode, ident, ref sub_pat) = pat.node { @@ -2062,7 +2061,7 @@ impl<'a> Resolver<'a> { fn resolve_arm(&mut self, arm: &Arm) { self.value_ribs.push(Rib::new(NormalRibKind)); - let mut bindings_list = HashMap::new(); + let mut bindings_list = FnvHashMap(); for pattern in &arm.pats { self.resolve_pattern(&pattern, PatternSource::Match, &mut bindings_list); } @@ -2202,7 +2201,7 @@ impl<'a> Resolver<'a> { pat_id: NodeId, outer_pat_id: NodeId, pat_src: PatternSource, - bindings: &mut HashMap) + bindings: &mut FnvHashMap) -> PathResolution { // Add the binding to the local ribs, if it // doesn't already exist in the bindings map. (We @@ -2303,7 +2302,7 @@ impl<'a> Resolver<'a> { pat_src: PatternSource, // Maps idents to the node ID for the // outermost pattern that binds them. - bindings: &mut HashMap) { + bindings: &mut FnvHashMap) { // Visit all direct subpatterns of this pattern. let outer_pat_id = pat.id; pat.walk(&mut |pat| { @@ -3016,7 +3015,7 @@ impl<'a> Resolver<'a> { self.visit_expr(subexpression); self.value_ribs.push(Rib::new(NormalRibKind)); - self.resolve_pattern(pattern, PatternSource::IfLet, &mut HashMap::new()); + self.resolve_pattern(pattern, PatternSource::IfLet, &mut FnvHashMap()); self.visit_block(if_block); self.value_ribs.pop(); @@ -3026,7 +3025,7 @@ impl<'a> Resolver<'a> { ExprKind::WhileLet(ref pattern, ref subexpression, ref block, label) => { self.visit_expr(subexpression); self.value_ribs.push(Rib::new(NormalRibKind)); - self.resolve_pattern(pattern, PatternSource::WhileLet, &mut HashMap::new()); + self.resolve_pattern(pattern, PatternSource::WhileLet, &mut FnvHashMap()); self.resolve_labeled_block(label.map(|l| l.node), expr.id, block); @@ -3036,7 +3035,7 @@ impl<'a> Resolver<'a> { ExprKind::ForLoop(ref pattern, ref subexpression, ref block, label) => { self.visit_expr(subexpression); self.value_ribs.push(Rib::new(NormalRibKind)); - self.resolve_pattern(pattern, PatternSource::For, &mut HashMap::new()); + self.resolve_pattern(pattern, PatternSource::For, &mut FnvHashMap()); self.resolve_labeled_block(label.map(|l| l.node), expr.id, block); @@ -3297,7 +3296,7 @@ impl<'a> Resolver<'a> { fn report_privacy_errors(&self) { if self.privacy_errors.len() == 0 { return } - let mut reported_spans = HashSet::new(); + let mut reported_spans = FnvHashSet(); for &PrivacyError(span, name, binding) in &self.privacy_errors { if !reported_spans.insert(span) { continue } if binding.is_extern_crate() { diff --git a/src/librustc_trans/back/write.rs b/src/librustc_trans/back/write.rs index 4b9c29d3d7db3..081b4431bd7b8 100644 --- a/src/librustc_trans/back/write.rs +++ b/src/librustc_trans/back/write.rs @@ -10,7 +10,7 @@ use back::lto; use back::link::{get_linker, remove}; -use rustc_incremental::save_trans_partition; +use rustc_incremental::{save_trans_partition, in_incr_comp_dir}; use session::config::{OutputFilenames, OutputTypes, Passes, SomePasses, AllPasses}; use session::Session; use session::config::{self, OutputType}; @@ -328,8 +328,9 @@ struct CodegenContext<'a> { remark: Passes, // Worker thread number worker: usize, - // Directory where incremental data is stored (if any) - incremental: Option, + // The incremental compilation session directory, or None if we are not + // compiling incrementally + incr_comp_session_dir: Option } impl<'a> CodegenContext<'a> { @@ -340,7 +341,7 @@ impl<'a> CodegenContext<'a> { plugin_passes: sess.plugin_llvm_passes.borrow().clone(), remark: sess.opts.cg.remark.clone(), worker: 0, - incremental: sess.opts.incremental.clone(), + incr_comp_session_dir: sess.incr_comp_session_dir_opt().map(|r| r.clone()) } } } @@ -962,17 +963,20 @@ fn execute_work_item(cgcx: &CodegenContext, work_item.output_names); } ModuleSource::Preexisting(wp) => { - let incremental = cgcx.incremental.as_ref().unwrap(); + let incr_comp_session_dir = cgcx.incr_comp_session_dir + .as_ref() + .unwrap(); let name = &work_item.mtrans.name; for (kind, saved_file) in wp.saved_files { let obj_out = work_item.output_names.temp_path(kind, Some(name)); - let source_file = incremental.join(&saved_file); + let source_file = in_incr_comp_dir(&incr_comp_session_dir, + &saved_file); debug!("copying pre-existing module `{}` from {:?} to {}", work_item.mtrans.name, source_file, obj_out.display()); match link_or_copy(&source_file, &obj_out) { - Ok(()) => { } + Ok(_) => { } Err(err) => { cgcx.handler.err(&format!("unable to copy {} to {}: {}", source_file.display(), @@ -1018,7 +1022,7 @@ fn run_work_multithreaded(sess: &Session, let mut tx = Some(tx); futures.push(rx); - let incremental = sess.opts.incremental.clone(); + let incr_comp_session_dir = sess.incr_comp_session_dir_opt().map(|r| r.clone()); thread::Builder::new().name(format!("codegen-{}", i)).spawn(move || { let diag_handler = Handler::with_emitter(true, false, box diag_emitter); @@ -1031,7 +1035,7 @@ fn run_work_multithreaded(sess: &Session, plugin_passes: plugin_passes, remark: remark, worker: i, - incremental: incremental, + incr_comp_session_dir: incr_comp_session_dir }; loop { diff --git a/src/librustc_trans/base.rs b/src/librustc_trans/base.rs index f190fbeb6feb9..5d7e2b21e9ee7 100644 --- a/src/librustc_trans/base.rs +++ b/src/librustc_trans/base.rs @@ -92,14 +92,13 @@ use value::Value; use Disr; use util::common::indenter; use util::sha2::Sha256; -use util::nodemap::{NodeMap, NodeSet, FnvHashSet}; +use util::nodemap::{NodeMap, NodeSet, FnvHashMap, FnvHashSet}; use arena::TypedArena; use libc::c_uint; use std::ffi::{CStr, CString}; use std::borrow::Cow; use std::cell::{Cell, RefCell}; -use std::collections::HashMap; use std::ptr; use std::rc::Rc; use std::str; @@ -1531,7 +1530,7 @@ impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { let fragment_infos = tcx.fragment_infos.borrow(); // Intern table for drop-flag hint datums. - let mut seen = HashMap::new(); + let mut seen = FnvHashMap(); let fragment_infos = fn_did.and_then(|did| fragment_infos.get(&did)); if let Some(fragment_infos) = fragment_infos { @@ -2801,7 +2800,7 @@ fn collect_and_partition_translation_items<'a, 'tcx>(scx: &SharedCrateContext<'a } if scx.sess().opts.debugging_opts.print_trans_items.is_some() { - let mut item_to_cgus = HashMap::new(); + let mut item_to_cgus = FnvHashMap(); for cgu in &codegen_units { for (&trans_item, &linkage) in cgu.items() { diff --git a/src/librustc_trans/debuginfo/create_scope_map.rs b/src/librustc_trans/debuginfo/create_scope_map.rs index fe6a48d4c559d..0e77c107e21e1 100644 --- a/src/librustc_trans/debuginfo/create_scope_map.rs +++ b/src/librustc_trans/debuginfo/create_scope_map.rs @@ -29,6 +29,8 @@ use rustc_data_structures::bitvec::BitVector; use rustc_data_structures::indexed_vec::{Idx, IndexVec}; use rustc::hir::{self, PatKind}; +use syntax_pos::BytePos; + // This procedure builds the *scope map* for a given function, which maps any // given ast::NodeId in the function's AST to the correct DIScope metadata instance. // @@ -68,11 +70,31 @@ pub fn create_scope_map(cx: &CrateContext, return scope_map; } +#[derive(Clone, Copy, Debug)] +pub struct MirDebugScope { + pub scope_metadata: DIScope, + // Start and end offsets of the file to which this DIScope belongs. + // These are used to quickly determine whether some span refers to the same file. + pub file_start_pos: BytePos, + pub file_end_pos: BytePos, +} + +impl MirDebugScope { + pub fn is_valid(&self) -> bool { + !self.scope_metadata.is_null() + } +} + /// Produce DIScope DIEs for each MIR Scope which has variables defined in it. /// If debuginfo is disabled, the returned vector is empty. -pub fn create_mir_scopes(fcx: &FunctionContext) -> IndexVec { +pub fn create_mir_scopes(fcx: &FunctionContext) -> IndexVec { let mir = fcx.mir.clone().expect("create_mir_scopes: missing MIR for fn"); - let mut scopes = IndexVec::from_elem(ptr::null_mut(), &mir.visibility_scopes); + let null_scope = MirDebugScope { + scope_metadata: ptr::null_mut(), + file_start_pos: BytePos(0), + file_end_pos: BytePos(0) + }; + let mut scopes = IndexVec::from_elem(null_scope, &mir.visibility_scopes); let fn_metadata = match fcx.debug_context { FunctionDebugContext::RegularContext(box ref data) => data.fn_metadata, @@ -102,8 +124,8 @@ fn make_mir_scope(ccx: &CrateContext, has_variables: &BitVector, fn_metadata: DISubprogram, scope: VisibilityScope, - scopes: &mut IndexVec) { - if !scopes[scope].is_null() { + scopes: &mut IndexVec) { + if scopes[scope].is_valid() { return; } @@ -113,7 +135,12 @@ fn make_mir_scope(ccx: &CrateContext, scopes[parent] } else { // The root is the function itself. - scopes[scope] = fn_metadata; + let loc = span_start(ccx, mir.span); + scopes[scope] = MirDebugScope { + scope_metadata: fn_metadata, + file_start_pos: loc.file.start_pos, + file_end_pos: loc.file.end_pos, + }; return; }; @@ -124,22 +151,27 @@ fn make_mir_scope(ccx: &CrateContext, // However, we don't skip creating a nested scope if // our parent is the root, because we might want to // put arguments in the root and not have shadowing. - if parent_scope != fn_metadata { + if parent_scope.scope_metadata != fn_metadata { scopes[scope] = parent_scope; return; } } let loc = span_start(ccx, scope_data.span); - scopes[scope] = unsafe { let file_metadata = file_metadata(ccx, &loc.file.name, &loc.file.abs_path); + let scope_metadata = unsafe { llvm::LLVMRustDIBuilderCreateLexicalBlock( DIB(ccx), - parent_scope, + parent_scope.scope_metadata, file_metadata, loc.line as c_uint, loc.col.to_usize() as c_uint) }; + scopes[scope] = MirDebugScope { + scope_metadata: scope_metadata, + file_start_pos: loc.file.start_pos, + file_end_pos: loc.file.end_pos, + }; } // local helper functions for walking the AST. diff --git a/src/librustc_trans/debuginfo/metadata.rs b/src/librustc_trans/debuginfo/metadata.rs index f505efb1ab2f9..8cbb659a77e38 100644 --- a/src/librustc_trans/debuginfo/metadata.rs +++ b/src/librustc_trans/debuginfo/metadata.rs @@ -23,7 +23,7 @@ use context::SharedCrateContext; use session::Session; use llvm::{self, ValueRef}; -use llvm::debuginfo::{DIType, DIFile, DIScope, DIDescriptor, DICompositeType}; +use llvm::debuginfo::{DIType, DIFile, DIScope, DIDescriptor, DICompositeType, DILexicalBlock}; use rustc::hir::def_id::DefId; use rustc::hir::pat_util; @@ -2086,3 +2086,17 @@ pub fn create_argument_metadata(bcx: Block, arg: &hir::Arg) { span); }) } + +// Creates an "extension" of an existing DIScope into another file. +pub fn extend_scope_to_file(ccx: &CrateContext, + scope_metadata: DIScope, + file: &syntax_pos::FileMap) + -> DILexicalBlock { + let file_metadata = file_metadata(ccx, &file.name, &file.abs_path); + unsafe { + llvm::LLVMRustDIBuilderCreateLexicalBlockFile( + DIB(ccx), + scope_metadata, + file_metadata) + } +} diff --git a/src/librustc_trans/debuginfo/mod.rs b/src/librustc_trans/debuginfo/mod.rs index 1ee000992b9c5..66017e74e13fc 100644 --- a/src/librustc_trans/debuginfo/mod.rs +++ b/src/librustc_trans/debuginfo/mod.rs @@ -54,7 +54,7 @@ pub mod metadata; mod create_scope_map; mod source_loc; -pub use self::create_scope_map::create_mir_scopes; +pub use self::create_scope_map::{create_mir_scopes, MirDebugScope}; pub use self::source_loc::start_emitting_source_locations; pub use self::source_loc::get_cleanup_debug_loc_for_ast_node; pub use self::source_loc::with_source_location_override; @@ -63,6 +63,7 @@ pub use self::metadata::create_argument_metadata; pub use self::metadata::create_captured_var_metadata; pub use self::metadata::create_global_var_metadata; pub use self::metadata::create_local_var_metadata; +pub use self::metadata::extend_scope_to_file; #[allow(non_upper_case_globals)] const DW_TAG_auto_variable: c_uint = 0x100; diff --git a/src/librustc_trans/mir/analyze.rs b/src/librustc_trans/mir/analyze.rs index e0d959f4774a6..66eb78aef07b4 100644 --- a/src/librustc_trans/mir/analyze.rs +++ b/src/librustc_trans/mir/analyze.rs @@ -48,6 +48,12 @@ pub fn lvalue_locals<'bcx, 'tcx>(bcx: Block<'bcx,'tcx>, common::type_is_fat_ptr(bcx.tcx(), ty)); } else if common::type_is_imm_pair(bcx.ccx(), ty) { // We allow pairs and uses of any of their 2 fields. + } else if !analyzer.seen_assigned.contains(index) { + // No assignment has been seen, which means that + // either the local has been marked as lvalue + // already, or there is no possible initialization + // for the local, making any reads invalid. + // This is useful in weeding out dead temps. } else { // These sorts of types require an alloca. Note that // type_is_immediate() may *still* be true, particularly diff --git a/src/librustc_trans/mir/mod.rs b/src/librustc_trans/mir/mod.rs index 727b680541dd7..8d4e28aadc65d 100644 --- a/src/librustc_trans/mir/mod.rs +++ b/src/librustc_trans/mir/mod.rs @@ -10,18 +10,17 @@ use libc::c_uint; use llvm::{self, ValueRef}; -use llvm::debuginfo::DIScope; use rustc::ty; use rustc::mir::repr as mir; use rustc::mir::tcx::LvalueTy; use session::config::FullDebugInfo; use base; use common::{self, Block, BlockAndBuilder, CrateContext, FunctionContext, C_null}; -use debuginfo::{self, declare_local, DebugLoc, VariableAccess, VariableKind}; +use debuginfo::{self, declare_local, DebugLoc, VariableAccess, VariableKind, FunctionDebugContext}; use machine; use type_of; -use syntax_pos::DUMMY_SP; +use syntax_pos::{DUMMY_SP, NO_EXPANSION, COMMAND_LINE_EXPN, BytePos}; use syntax::parse::token::keywords; use std::ops::Deref; @@ -103,12 +102,67 @@ pub struct MirContext<'bcx, 'tcx:'bcx> { locals: IndexVec>, /// Debug information for MIR scopes. - scopes: IndexVec + scopes: IndexVec, } impl<'blk, 'tcx> MirContext<'blk, 'tcx> { - pub fn debug_loc(&self, source_info: mir::SourceInfo) -> DebugLoc { - DebugLoc::ScopeAt(self.scopes[source_info.scope], source_info.span) + pub fn debug_loc(&mut self, source_info: mir::SourceInfo) -> DebugLoc { + // Bail out if debug info emission is not enabled. + match self.fcx.debug_context { + FunctionDebugContext::DebugInfoDisabled | + FunctionDebugContext::FunctionWithoutDebugInfo => { + // Can't return DebugLoc::None here because intrinsic::trans_intrinsic_call() + // relies on debug location to obtain span of the call site. + return DebugLoc::ScopeAt(self.scopes[source_info.scope].scope_metadata, + source_info.span); + } + FunctionDebugContext::RegularContext(_) =>{} + } + + // In order to have a good line stepping behavior in debugger, we overwrite debug + // locations of macro expansions with that of the outermost expansion site + // (unless the crate is being compiled with `-Z debug-macros`). + if source_info.span.expn_id == NO_EXPANSION || + source_info.span.expn_id == COMMAND_LINE_EXPN || + self.fcx.ccx.sess().opts.debugging_opts.debug_macros { + + let scope_metadata = self.scope_metadata_for_loc(source_info.scope, + source_info.span.lo); + DebugLoc::ScopeAt(scope_metadata, source_info.span) + } else { + let cm = self.fcx.ccx.sess().codemap(); + // Walk up the macro expansion chain until we reach a non-expanded span. + let mut span = source_info.span; + while span.expn_id != NO_EXPANSION && span.expn_id != COMMAND_LINE_EXPN { + if let Some(callsite_span) = cm.with_expn_info(span.expn_id, + |ei| ei.map(|ei| ei.call_site.clone())) { + span = callsite_span; + } else { + break; + } + } + let scope_metadata = self.scope_metadata_for_loc(source_info.scope, span.lo); + // Use span of the outermost call site, while keeping the original lexical scope + DebugLoc::ScopeAt(scope_metadata, span) + } + } + + // DILocations inherit source file name from the parent DIScope. Due to macro expansions + // it may so happen that the current span belongs to a different file than the DIScope + // corresponding to span's containing visibility scope. If so, we need to create a DIScope + // "extension" into that file. + fn scope_metadata_for_loc(&self, scope_id: mir::VisibilityScope, pos: BytePos) + -> llvm::debuginfo::DIScope { + let scope_metadata = self.scopes[scope_id].scope_metadata; + if pos < self.scopes[scope_id].file_start_pos || + pos >= self.scopes[scope_id].file_end_pos { + let cm = self.fcx.ccx.sess().codemap(); + debuginfo::extend_scope_to_file(self.fcx.ccx, + scope_metadata, + &cm.lookup_char_pos(pos).file) + } else { + scope_metadata + } } } @@ -155,16 +209,38 @@ pub fn trans_mir<'blk, 'tcx: 'blk>(fcx: &'blk FunctionContext<'blk, 'tcx>) { analyze::cleanup_kinds(bcx, &mir)) }); + // Allocate a `Block` for every basic block + let block_bcxs: IndexVec> = + mir.basic_blocks().indices().map(|bb| { + if bb == mir::START_BLOCK { + fcx.new_block("start", None) + } else { + fcx.new_block(&format!("{:?}", bb), None) + } + }).collect(); + // Compute debuginfo scopes from MIR scopes. let scopes = debuginfo::create_mir_scopes(fcx); + let mut mircx = MirContext { + mir: mir.clone(), + fcx: fcx, + llpersonalityslot: None, + blocks: block_bcxs, + unreachable_block: None, + cleanup_kinds: cleanup_kinds, + landing_pads: IndexVec::from_elem(None, mir.basic_blocks()), + scopes: scopes, + locals: IndexVec::new(), + }; + // Allocate variable and temp allocas - let locals = { - let args = arg_local_refs(&bcx, &mir, &scopes, &lvalue_locals); + mircx.locals = { + let args = arg_local_refs(&bcx, &mir, &mircx.scopes, &lvalue_locals); let vars = mir.var_decls.iter().enumerate().map(|(i, decl)| { let ty = bcx.monomorphize(&decl.ty); - let scope = scopes[decl.source_info.scope]; - let dbg = !scope.is_null() && bcx.sess().opts.debuginfo == FullDebugInfo; + let debug_scope = mircx.scopes[decl.source_info.scope]; + let dbg = debug_scope.is_valid() && bcx.sess().opts.debuginfo == FullDebugInfo; let local = mir.local_index(&mir::Lvalue::Var(mir::Var::new(i))).unwrap(); if !lvalue_locals.contains(local.index()) && !dbg { @@ -173,11 +249,16 @@ pub fn trans_mir<'blk, 'tcx: 'blk>(fcx: &'blk FunctionContext<'blk, 'tcx>) { let lvalue = LvalueRef::alloca(&bcx, ty, &decl.name.as_str()); if dbg { - bcx.with_block(|bcx| { - declare_local(bcx, decl.name, ty, scope, - VariableAccess::DirectVariable { alloca: lvalue.llval }, - VariableKind::LocalVariable, decl.source_info.span); - }); + let dbg_loc = mircx.debug_loc(decl.source_info); + if let DebugLoc::ScopeAt(scope, span) = dbg_loc { + bcx.with_block(|bcx| { + declare_local(bcx, decl.name, ty, scope, + VariableAccess::DirectVariable { alloca: lvalue.llval }, + VariableKind::LocalVariable, span); + }); + } else { + panic!("Unexpected"); + } } LocalRef::Lvalue(lvalue) }); @@ -203,18 +284,8 @@ pub fn trans_mir<'blk, 'tcx: 'blk>(fcx: &'blk FunctionContext<'blk, 'tcx>) { })).collect() }; - // Allocate a `Block` for every basic block - let block_bcxs: IndexVec> = - mir.basic_blocks().indices().map(|bb| { - if bb == mir::START_BLOCK { - fcx.new_block("start", None) - } else { - fcx.new_block(&format!("{:?}", bb), None) - } - }).collect(); - // Branch to the START block - let start_bcx = block_bcxs[mir::START_BLOCK]; + let start_bcx = mircx.blocks[mir::START_BLOCK]; bcx.br(start_bcx.llbb); // Up until here, IR instructions for this function have explicitly not been annotated with @@ -222,18 +293,6 @@ pub fn trans_mir<'blk, 'tcx: 'blk>(fcx: &'blk FunctionContext<'blk, 'tcx>) { // emitting should be enabled. debuginfo::start_emitting_source_locations(fcx); - let mut mircx = MirContext { - mir: mir.clone(), - fcx: fcx, - llpersonalityslot: None, - blocks: block_bcxs, - unreachable_block: None, - cleanup_kinds: cleanup_kinds, - landing_pads: IndexVec::from_elem(None, mir.basic_blocks()), - locals: locals, - scopes: scopes - }; - let mut visited = BitVector::new(mir.basic_blocks().len()); let mut rpo = traversal::reverse_postorder(&mir); @@ -271,7 +330,7 @@ pub fn trans_mir<'blk, 'tcx: 'blk>(fcx: &'blk FunctionContext<'blk, 'tcx>) { /// indirect. fn arg_local_refs<'bcx, 'tcx>(bcx: &BlockAndBuilder<'bcx, 'tcx>, mir: &mir::Mir<'tcx>, - scopes: &IndexVec, + scopes: &IndexVec, lvalue_locals: &BitVector) -> Vec> { let fcx = bcx.fcx(); @@ -281,8 +340,8 @@ fn arg_local_refs<'bcx, 'tcx>(bcx: &BlockAndBuilder<'bcx, 'tcx>, // Get the argument scope, if it exists and if we need it. let arg_scope = scopes[mir::ARGUMENT_VISIBILITY_SCOPE]; - let arg_scope = if !arg_scope.is_null() && bcx.sess().opts.debuginfo == FullDebugInfo { - Some(arg_scope) + let arg_scope = if arg_scope.is_valid() && bcx.sess().opts.debuginfo == FullDebugInfo { + Some(arg_scope.scope_metadata) } else { None }; diff --git a/src/librustc_typeck/check/compare_method.rs b/src/librustc_typeck/check/compare_method.rs index 0e42990a337d4..78476e814006f 100644 --- a/src/librustc_typeck/check/compare_method.rs +++ b/src/librustc_typeck/check/compare_method.rs @@ -469,10 +469,11 @@ pub fn compare_impl_method<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, // are zero. Since I don't quite know how to phrase things at // the moment, give a kind of vague error message. if trait_params.len() != impl_params.len() { - span_err!(ccx.tcx.sess, span, E0195, + struct_span_err!(ccx.tcx.sess, span, E0195, "lifetime parameters or bounds on method `{}` do \ - not match the trait declaration", - impl_m.name); + not match the trait declaration",impl_m.name) + .span_label(span, &format!("lifetimes do not match trait")) + .emit(); return false; } diff --git a/src/librustc_typeck/check/intrinsic.rs b/src/librustc_typeck/check/intrinsic.rs index e6da03a903f21..3e68232020655 100644 --- a/src/librustc_typeck/check/intrinsic.rs +++ b/src/librustc_typeck/check/intrinsic.rs @@ -16,9 +16,9 @@ use rustc::infer::TypeOrigin; use rustc::ty::subst::Substs; use rustc::ty::FnSig; use rustc::ty::{self, Ty}; +use rustc::util::nodemap::FnvHashMap; use {CrateCtxt, require_same_types}; -use std::collections::{HashMap}; use syntax::abi::Abi; use syntax::ast; use syntax::parse::token; @@ -365,7 +365,7 @@ pub fn check_platform_intrinsic_type(ccx: &CrateCtxt, return } - let mut structural_to_nomimal = HashMap::new(); + let mut structural_to_nomimal = FnvHashMap(); let sig = tcx.no_late_bound_regions(i_ty.ty.fn_sig()).unwrap(); if intr.inputs.len() != sig.inputs.len() { @@ -405,7 +405,7 @@ fn match_intrinsic_type_to_type<'tcx, 'a>( ccx: &CrateCtxt<'a, 'tcx>, position: &str, span: Span, - structural_to_nominal: &mut HashMap<&'a intrinsics::Type, ty::Ty<'tcx>>, + structural_to_nominal: &mut FnvHashMap<&'a intrinsics::Type, ty::Ty<'tcx>>, expected: &'a intrinsics::Type, t: ty::Ty<'tcx>) { use intrinsics::Type::*; diff --git a/src/librustc_typeck/check/method/probe.rs b/src/librustc_typeck/check/method/probe.rs index c306463ec1de0..4a1473d430610 100644 --- a/src/librustc_typeck/check/method/probe.rs +++ b/src/librustc_typeck/check/method/probe.rs @@ -20,10 +20,10 @@ use rustc::ty::subst::{Subst, Substs}; use rustc::traits; use rustc::ty::{self, Ty, ToPolyTraitRef, TraitRef, TypeFoldable}; use rustc::infer::{InferOk, TypeOrigin}; +use rustc::util::nodemap::FnvHashSet; use syntax::ast; use syntax_pos::{Span, DUMMY_SP}; use rustc::hir; -use std::collections::HashSet; use std::mem; use std::ops::Deref; use std::rc::Rc; @@ -40,7 +40,7 @@ struct ProbeContext<'a, 'gcx: 'a+'tcx, 'tcx: 'a> { opt_simplified_steps: Option>, inherent_candidates: Vec>, extension_candidates: Vec>, - impl_dups: HashSet, + impl_dups: FnvHashSet, import_id: Option, /// Collects near misses when the candidate functions are missing a `self` keyword and is only @@ -255,7 +255,7 @@ impl<'a, 'gcx, 'tcx> ProbeContext<'a, 'gcx, 'tcx> { item_name: item_name, inherent_candidates: Vec::new(), extension_candidates: Vec::new(), - impl_dups: HashSet::new(), + impl_dups: FnvHashSet(), import_id: None, steps: Rc::new(steps), opt_simplified_steps: opt_simplified_steps, @@ -574,7 +574,7 @@ impl<'a, 'gcx, 'tcx> ProbeContext<'a, 'gcx, 'tcx> { expr_id: ast::NodeId) -> Result<(), MethodError<'tcx>> { - let mut duplicates = HashSet::new(); + let mut duplicates = FnvHashSet(); let opt_applicable_traits = self.tcx.trait_map.get(&expr_id); if let Some(applicable_traits) = opt_applicable_traits { for trait_candidate in applicable_traits { @@ -591,7 +591,7 @@ impl<'a, 'gcx, 'tcx> ProbeContext<'a, 'gcx, 'tcx> { } fn assemble_extension_candidates_for_all_traits(&mut self) -> Result<(), MethodError<'tcx>> { - let mut duplicates = HashSet::new(); + let mut duplicates = FnvHashSet(); for trait_info in suggest::all_traits(self.ccx) { if duplicates.insert(trait_info.def_id) { self.assemble_extension_candidates_for_trait(trait_info.def_id)?; diff --git a/src/librustc_typeck/check/mod.rs b/src/librustc_typeck/check/mod.rs index 3d51da02b874d..4cbad6d796c36 100644 --- a/src/librustc_typeck/check/mod.rs +++ b/src/librustc_typeck/check/mod.rs @@ -104,10 +104,9 @@ use CrateCtxt; use TypeAndSubsts; use lint; use util::common::{block_query, ErrorReported, indenter, loop_query}; -use util::nodemap::{DefIdMap, FnvHashMap, NodeMap}; +use util::nodemap::{DefIdMap, FnvHashMap, FnvHashSet, NodeMap}; use std::cell::{Cell, Ref, RefCell}; -use std::collections::{HashSet}; use std::mem::replace; use std::ops::Deref; use syntax::abi::Abi; @@ -708,7 +707,13 @@ fn check_fn<'a, 'gcx, 'tcx>(inherited: &'a Inherited<'a, 'gcx, 'tcx>, inherited.tables.borrow_mut().liberated_fn_sigs.insert(fn_id, fn_sig); - fcx.check_block_with_expected(body, ExpectHasType(fcx.ret_ty)); + // FIXME(aburka) do we need this special case? and should it be is_uninhabited? + let expected = if fcx.ret_ty.is_never() { + NoExpectation + } else { + ExpectHasType(fcx.ret_ty) + }; + fcx.check_block_with_expected(body, expected); fcx } @@ -2039,7 +2044,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { .filter_map(|t| self.default(t).map(|d| (t, d))) .collect(); - let mut unbound_tyvars = HashSet::new(); + let mut unbound_tyvars = FnvHashSet(); debug!("select_all_obligations_and_apply_defaults: defaults={:?}", default_map); @@ -2186,7 +2191,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { // table then apply defaults until we find a conflict. That default must be the one // that caused conflict earlier. fn find_conflicting_default(&self, - unbound_vars: &HashSet>, + unbound_vars: &FnvHashSet>, default_map: &FnvHashMap<&Ty<'tcx>, type_variable::Default<'tcx>>, conflict: Ty<'tcx>) -> Option> { diff --git a/src/librustc_typeck/check/wfcheck.rs b/src/librustc_typeck/check/wfcheck.rs index bcad7dd3bd0fa..44f840dee8bac 100644 --- a/src/librustc_typeck/check/wfcheck.rs +++ b/src/librustc_typeck/check/wfcheck.rs @@ -16,8 +16,8 @@ use middle::region::{CodeExtent}; use rustc::infer::TypeOrigin; use rustc::traits; use rustc::ty::{self, Ty, TyCtxt}; +use rustc::util::nodemap::FnvHashSet; -use std::collections::HashSet; use syntax::ast; use syntax_pos::Span; use errors::DiagnosticBuilder; @@ -456,7 +456,7 @@ impl<'ccx, 'gcx> CheckTypeWellFormedVisitor<'ccx, 'gcx> { assert_eq!(ty_predicates.parent, None); let variances = self.tcx().item_variances(item_def_id); - let mut constrained_parameters: HashSet<_> = + let mut constrained_parameters: FnvHashSet<_> = variances.types .iter().enumerate() .filter(|&(_, &variance)| variance != ty::Bivariant) @@ -519,7 +519,7 @@ impl<'ccx, 'gcx> CheckTypeWellFormedVisitor<'ccx, 'gcx> { fn reject_shadowing_type_parameters(tcx: TyCtxt, span: Span, generics: &ty::Generics) { let parent = tcx.lookup_generics(generics.parent.unwrap()); - let impl_params: HashSet<_> = parent.types.iter().map(|tp| tp.name).collect(); + let impl_params: FnvHashSet<_> = parent.types.iter().map(|tp| tp.name).collect(); for method_param in &generics.types { if impl_params.contains(&method_param.name) { diff --git a/src/librustc_typeck/collect.rs b/src/librustc_typeck/collect.rs index 7e1fb32881d6f..f6df62f87e1d8 100644 --- a/src/librustc_typeck/collect.rs +++ b/src/librustc_typeck/collect.rs @@ -73,13 +73,12 @@ use rustc::ty::util::IntTypeExt; use rscope::*; use rustc::dep_graph::DepNode; use util::common::{ErrorReported, MemoizationMap}; -use util::nodemap::{NodeMap, FnvHashMap}; +use util::nodemap::{NodeMap, FnvHashMap, FnvHashSet}; use {CrateCtxt, write_ty_to_tcx}; use rustc_const_math::ConstInt; use std::cell::RefCell; -use std::collections::HashSet; use std::collections::hash_map::Entry::{Occupied, Vacant}; use std::rc::Rc; @@ -1923,9 +1922,9 @@ fn compute_object_lifetime_default<'a,'tcx>(ccx: &CrateCtxt<'a,'tcx>, { let inline_bounds = from_bounds(ccx, param_bounds); let where_bounds = from_predicates(ccx, param_id, &where_clause.predicates); - let all_bounds: HashSet<_> = inline_bounds.into_iter() - .chain(where_bounds) - .collect(); + let all_bounds: FnvHashSet<_> = inline_bounds.into_iter() + .chain(where_bounds) + .collect(); return if all_bounds.len() > 1 { ty::ObjectLifetimeDefault::Ambiguous } else if all_bounds.len() == 0 { @@ -2142,7 +2141,7 @@ fn enforce_impl_params_are_constrained<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, // The trait reference is an input, so find all type parameters // reachable from there, to start (if this is an inherent impl, // then just examine the self type). - let mut input_parameters: HashSet<_> = + let mut input_parameters: FnvHashSet<_> = ctp::parameters_for(&impl_scheme.ty, false).into_iter().collect(); if let Some(ref trait_ref) = impl_trait_ref { input_parameters.extend(ctp::parameters_for(trait_ref, false)); @@ -2171,7 +2170,7 @@ fn enforce_impl_lifetimes_are_constrained<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, let impl_predicates = ccx.tcx.lookup_predicates(impl_def_id); let impl_trait_ref = ccx.tcx.impl_trait_ref(impl_def_id); - let mut input_parameters: HashSet<_> = + let mut input_parameters: FnvHashSet<_> = ctp::parameters_for(&impl_scheme.ty, false).into_iter().collect(); if let Some(ref trait_ref) = impl_trait_ref { input_parameters.extend(ctp::parameters_for(trait_ref, false)); @@ -2179,7 +2178,7 @@ fn enforce_impl_lifetimes_are_constrained<'a, 'tcx>(ccx: &CrateCtxt<'a, 'tcx>, ctp::identify_constrained_type_params( &impl_predicates.predicates.as_slice(), impl_trait_ref, &mut input_parameters); - let lifetimes_in_associated_types: HashSet<_> = impl_items.iter() + let lifetimes_in_associated_types: FnvHashSet<_> = impl_items.iter() .map(|item| ccx.tcx.impl_or_trait_item(ccx.tcx.map.local_def_id(item.id))) .filter_map(|item| match item { ty::TypeTraitItem(ref assoc_ty) => assoc_ty.ty, diff --git a/src/librustc_typeck/constrained_type_params.rs b/src/librustc_typeck/constrained_type_params.rs index 7d3bd095a3a88..8682a5474fb55 100644 --- a/src/librustc_typeck/constrained_type_params.rs +++ b/src/librustc_typeck/constrained_type_params.rs @@ -10,7 +10,7 @@ use rustc::ty::{self, Ty}; use rustc::ty::fold::{TypeFoldable, TypeVisitor}; -use std::collections::HashSet; +use rustc::util::nodemap::FnvHashSet; #[derive(Clone, PartialEq, Eq, Hash, Debug)] pub enum Parameter { @@ -71,7 +71,7 @@ impl<'tcx> TypeVisitor<'tcx> for ParameterCollector { pub fn identify_constrained_type_params<'tcx>(predicates: &[ty::Predicate<'tcx>], impl_trait_ref: Option>, - input_parameters: &mut HashSet) + input_parameters: &mut FnvHashSet) { let mut predicates = predicates.to_owned(); setup_constraining_predicates(&mut predicates, impl_trait_ref, input_parameters); @@ -120,7 +120,7 @@ pub fn identify_constrained_type_params<'tcx>(predicates: &[ty::Predicate<'tcx>] /// think of any. pub fn setup_constraining_predicates<'tcx>(predicates: &mut [ty::Predicate<'tcx>], impl_trait_ref: Option>, - input_parameters: &mut HashSet) + input_parameters: &mut FnvHashSet) { // The canonical way of doing the needed topological sort // would be a DFS, but getting the graph and its ownership diff --git a/src/librustc_unicode/char.rs b/src/librustc_unicode/char.rs index 81856cb87c7c4..7b6fcb5ade8bc 100644 --- a/src/librustc_unicode/char.rs +++ b/src/librustc_unicode/char.rs @@ -39,6 +39,8 @@ pub use core::char::{MAX, from_digit, from_u32, from_u32_unchecked}; pub use core::char::{EncodeUtf16, EncodeUtf8, EscapeDebug, EscapeDefault, EscapeUnicode}; // unstable reexports +#[unstable(feature = "try_from", issue = "33417")] +pub use core::char::CharTryFromError; #[unstable(feature = "decode_utf8", issue = "33906")] pub use core::char::{DecodeUtf8, decode_utf8}; #[unstable(feature = "unicode", issue = "27783")] diff --git a/src/librustc_unicode/lib.rs b/src/librustc_unicode/lib.rs index 3ae905eba279b..cb0203d3d7e49 100644 --- a/src/librustc_unicode/lib.rs +++ b/src/librustc_unicode/lib.rs @@ -37,6 +37,7 @@ #![feature(decode_utf8)] #![feature(lang_items)] #![feature(staged_api)] +#![feature(try_from)] #![feature(unicode)] mod tables; diff --git a/src/librustdoc/html/render.rs b/src/librustdoc/html/render.rs index e02cfb96dddf1..f9e5f1d89a8ee 100644 --- a/src/librustdoc/html/render.rs +++ b/src/librustdoc/html/render.rs @@ -61,6 +61,7 @@ use rustc::middle::privacy::AccessLevels; use rustc::middle::stability; use rustc::session::config::get_unstable_features_setting; use rustc::hir; +use rustc_data_structures::flock; use clean::{self, Attributes, GetDefId}; use doctree; @@ -650,7 +651,7 @@ fn write_shared(cx: &Context, // docs placed in the output directory, so this needs to be a synchronized // operation with respect to all other rustdocs running around. try_err!(mkdir(&cx.dst), &cx.dst); - let _lock = ::flock::Lock::new(&cx.dst.join(".lock")); + let _lock = flock::Lock::panicking_new(&cx.dst.join(".lock"), true, true, true); // Add all the static files. These may already exist, but we just // overwrite them anyway to make sure that they're fresh and up-to-date. diff --git a/src/librustdoc/lib.rs b/src/librustdoc/lib.rs index 255e6b1e786df..0e685f063bd7b 100644 --- a/src/librustdoc/lib.rs +++ b/src/librustdoc/lib.rs @@ -35,6 +35,7 @@ extern crate libc; extern crate rustc; extern crate rustc_const_eval; extern crate rustc_const_math; +extern crate rustc_data_structures; extern crate rustc_trans; extern crate rustc_driver; extern crate rustc_resolve; @@ -86,7 +87,6 @@ pub mod plugins; pub mod visit_ast; pub mod visit_lib; pub mod test; -mod flock; use clean::Attributes; diff --git a/src/libstd/fs.rs b/src/libstd/fs.rs index b78db24e44b70..654e93144808f 100644 --- a/src/libstd/fs.rs +++ b/src/libstd/fs.rs @@ -348,6 +348,30 @@ impl File { inner: self.inner.duplicate()? }) } + + /// Reads a number of bytes starting from a given offset. + /// + /// The offset is relative to the file start and thus independent from the + /// current cursor. + /// + /// Note that similar to `File::read`, it is not an error to return a short + /// read. + #[unstable(feature = "file_offset", issue = "35918")] + pub fn read_at(&self, buf: &mut [u8], offset: u64) -> io::Result { + self.inner.read_at(buf, offset) + } + + /// Writes a number of bytes starting from a given offset. + /// + /// The offset is relative to the file start and thus independent from the + /// current cursor. + /// + /// Note that similar to `File::write`, it is not an error to return a + /// short write. + #[unstable(feature = "file_offset", issue = "35918")] + pub fn write_at(&self, buf: &[u8], offset: u64) -> io::Result { + self.inner.write_at(buf, offset) + } } impl AsInner for File { @@ -1893,6 +1917,53 @@ mod tests { check!(fs::remove_file(filename)); } + #[test] + fn file_test_io_read_write_at() { + let tmpdir = tmpdir(); + let filename = tmpdir.join("file_rt_io_file_test_read_write_at.txt"); + let mut buf = [0; 256]; + let write1 = "asdf"; + let write2 = "qwer-"; + let write3 = "-zxcv"; + let content = "qwer-asdf-zxcv"; + { + let oo = OpenOptions::new().create_new(true).write(true).read(true).clone(); + let mut rw = check!(oo.open(&filename)); + assert_eq!(check!(rw.write_at(write1.as_bytes(), 5)), write1.len()); + assert_eq!(check!(rw.seek(SeekFrom::Current(0))), 0); + assert_eq!(check!(rw.read_at(&mut buf, 5)), write1.len()); + assert_eq!(str::from_utf8(&buf[..write1.len()]), Ok(write1)); + assert_eq!(check!(rw.seek(SeekFrom::Current(0))), 0); + assert_eq!(check!(rw.write(write2.as_bytes())), write2.len()); + assert_eq!(check!(rw.seek(SeekFrom::Current(0))), 5); + assert_eq!(check!(rw.read(&mut buf)), write1.len()); + assert_eq!(str::from_utf8(&buf[..write1.len()]), Ok(write1)); + assert_eq!(check!(rw.seek(SeekFrom::Current(0))), 9); + assert_eq!(check!(rw.read_at(&mut buf[..write2.len()], 0)), write2.len()); + assert_eq!(str::from_utf8(&buf[..write2.len()]), Ok(write2)); + assert_eq!(check!(rw.seek(SeekFrom::Current(0))), 9); + assert_eq!(check!(rw.write_at(write3.as_bytes(), 9)), write3.len()); + assert_eq!(check!(rw.seek(SeekFrom::Current(0))), 9); + } + { + let mut read = check!(File::open(&filename)); + assert_eq!(check!(read.read_at(&mut buf, 0)), content.len()); + assert_eq!(str::from_utf8(&buf[..content.len()]), Ok(content)); + assert_eq!(check!(read.seek(SeekFrom::Current(0))), 0); + assert_eq!(check!(read.seek(SeekFrom::End(-5))), 9); + assert_eq!(check!(read.read_at(&mut buf, 0)), content.len()); + assert_eq!(str::from_utf8(&buf[..content.len()]), Ok(content)); + assert_eq!(check!(read.seek(SeekFrom::Current(0))), 9); + assert_eq!(check!(read.read(&mut buf)), write3.len()); + assert_eq!(str::from_utf8(&buf[..write3.len()]), Ok(write3)); + assert_eq!(check!(read.seek(SeekFrom::Current(0))), 14); + assert_eq!(check!(read.read_at(&mut buf, 0)), content.len()); + assert_eq!(str::from_utf8(&buf[..content.len()]), Ok(content)); + assert_eq!(check!(read.seek(SeekFrom::Current(0))), 14); + } + check!(fs::remove_file(&filename)); + } + #[test] fn file_test_stat_is_correct_on_is_file() { let tmpdir = tmpdir(); diff --git a/src/libstd/sys/unix/fd.rs b/src/libstd/sys/unix/fd.rs index b99f4a2eacde5..1661aad68caab 100644 --- a/src/libstd/sys/unix/fd.rs +++ b/src/libstd/sys/unix/fd.rs @@ -13,7 +13,7 @@ use prelude::v1::*; use io::{self, Read}; -use libc::{self, c_int, size_t, c_void}; +use libc::{self, c_int, off_t, size_t, c_void}; use mem; use sync::atomic::{AtomicBool, Ordering}; use sys::cvt; @@ -42,7 +42,7 @@ impl FileDesc { let ret = cvt(unsafe { libc::read(self.fd, buf.as_mut_ptr() as *mut c_void, - buf.len() as size_t) + buf.len()) })?; Ok(ret as usize) } @@ -52,6 +52,16 @@ impl FileDesc { (&mut me).read_to_end(buf) } + pub fn read_at(&self, buf: &mut [u8], offset: u64) -> io::Result { + let ret = cvt(unsafe { + libc::pread(self.fd, + buf.as_mut_ptr() as *mut c_void, + buf.len(), + offset as off_t) + })?; + Ok(ret as usize) + } + pub fn write(&self, buf: &[u8]) -> io::Result { let ret = cvt(unsafe { libc::write(self.fd, @@ -61,6 +71,16 @@ impl FileDesc { Ok(ret as usize) } + pub fn write_at(&self, buf: &[u8], offset: u64) -> io::Result { + let ret = cvt(unsafe { + libc::pwrite(self.fd, + buf.as_ptr() as *const c_void, + buf.len(), + offset as off_t) + })?; + Ok(ret as usize) + } + #[cfg(not(any(target_env = "newlib", target_os = "solaris", target_os = "emscripten")))] pub fn set_cloexec(&self) -> io::Result<()> { unsafe { diff --git a/src/libstd/sys/unix/fs.rs b/src/libstd/sys/unix/fs.rs index 3b132744f7055..a97b2439ddb56 100644 --- a/src/libstd/sys/unix/fs.rs +++ b/src/libstd/sys/unix/fs.rs @@ -477,10 +477,18 @@ impl File { self.0.read_to_end(buf) } + pub fn read_at(&self, buf: &mut [u8], offset: u64) -> io::Result { + self.0.read_at(buf, offset) + } + pub fn write(&self, buf: &[u8]) -> io::Result { self.0.write(buf) } + pub fn write_at(&self, buf: &[u8], offset: u64) -> io::Result { + self.0.write_at(buf, offset) + } + pub fn flush(&self) -> io::Result<()> { Ok(()) } pub fn seek(&self, pos: SeekFrom) -> io::Result { diff --git a/src/libstd/sys/unix/rand.rs b/src/libstd/sys/unix/rand.rs index 25a7a3ce50dc4..e4ca8344ee287 100644 --- a/src/libstd/sys/unix/rand.rs +++ b/src/libstd/sys/unix/rand.rs @@ -10,14 +10,31 @@ pub use self::imp::OsRng; -#[cfg(all(unix, not(target_os = "ios"), not(target_os = "openbsd")))] +use mem; + +fn next_u32(mut fill_buf: &mut FnMut(&mut [u8])) -> u32 { + let mut buf: [u8; 4] = [0; 4]; + fill_buf(&mut buf); + unsafe { mem::transmute::<[u8; 4], u32>(buf) } +} + +fn next_u64(mut fill_buf: &mut FnMut(&mut [u8])) -> u64 { + let mut buf: [u8; 8] = [0; 8]; + fill_buf(&mut buf); + unsafe { mem::transmute::<[u8; 8], u64>(buf) } +} + +#[cfg(all(unix, + not(target_os = "ios"), + not(target_os = "openbsd"), + not(target_os = "freebsd")))] mod imp { use self::OsRngInner::*; + use super::{next_u32, next_u64}; use fs::File; use io; use libc; - use mem; use rand::Rng; use rand::reader::ReaderRng; use sys::os::errno; @@ -87,18 +104,6 @@ mod imp { } } - fn getrandom_next_u32() -> u32 { - let mut buf: [u8; 4] = [0; 4]; - getrandom_fill_bytes(&mut buf); - unsafe { mem::transmute::<[u8; 4], u32>(buf) } - } - - fn getrandom_next_u64() -> u64 { - let mut buf: [u8; 8] = [0; 8]; - getrandom_fill_bytes(&mut buf); - unsafe { mem::transmute::<[u8; 8], u64>(buf) } - } - #[cfg(all(target_os = "linux", any(target_arch = "x86_64", target_arch = "x86", @@ -163,13 +168,13 @@ mod imp { impl Rng for OsRng { fn next_u32(&mut self) -> u32 { match self.inner { - OsGetrandomRng => getrandom_next_u32(), + OsGetrandomRng => next_u32(&mut getrandom_fill_bytes), OsReaderRng(ref mut rng) => rng.next_u32(), } } fn next_u64(&mut self) -> u64 { match self.inner { - OsGetrandomRng => getrandom_next_u64(), + OsGetrandomRng => next_u64(&mut getrandom_fill_bytes), OsReaderRng(ref mut rng) => rng.next_u64(), } } @@ -184,9 +189,10 @@ mod imp { #[cfg(target_os = "openbsd")] mod imp { + use super::{next_u32, next_u64}; + use io; use libc; - use mem; use sys::os::errno; use rand::Rng; @@ -205,14 +211,10 @@ mod imp { impl Rng for OsRng { fn next_u32(&mut self) -> u32 { - let mut v = [0; 4]; - self.fill_bytes(&mut v); - unsafe { mem::transmute(v) } + next_u32(&mut |v| self.fill_bytes(v)) } fn next_u64(&mut self) -> u64 { - let mut v = [0; 8]; - self.fill_bytes(&mut v); - unsafe { mem::transmute(v) } + next_u64(&mut |v| self.fill_bytes(v)) } fn fill_bytes(&mut self, v: &mut [u8]) { // getentropy(2) permits a maximum buffer size of 256 bytes @@ -230,8 +232,9 @@ mod imp { #[cfg(target_os = "ios")] mod imp { + use super::{next_u32, next_u64}; + use io; - use mem; use ptr; use rand::Rng; use libc::{c_int, size_t}; @@ -265,14 +268,10 @@ mod imp { impl Rng for OsRng { fn next_u32(&mut self) -> u32 { - let mut v = [0; 4]; - self.fill_bytes(&mut v); - unsafe { mem::transmute(v) } + next_u32(&mut |v| self.fill_bytes(v)) } fn next_u64(&mut self) -> u64 { - let mut v = [0; 8]; - self.fill_bytes(&mut v); - unsafe { mem::transmute(v) } + next_u64(&mut |v| self.fill_bytes(v)) } fn fill_bytes(&mut self, v: &mut [u8]) { let ret = unsafe { @@ -286,3 +285,51 @@ mod imp { } } } + +#[cfg(target_os = "freebsd")] +mod imp { + use super::{next_u32, next_u64}; + + use io; + use libc; + use rand::Rng; + use ptr; + + pub struct OsRng { + // dummy field to ensure that this struct cannot be constructed outside + // of this module + _dummy: (), + } + + impl OsRng { + /// Create a new `OsRng`. + pub fn new() -> io::Result { + Ok(OsRng { _dummy: () }) + } + } + + impl Rng for OsRng { + fn next_u32(&mut self) -> u32 { + next_u32(&mut |v| self.fill_bytes(v)) + } + fn next_u64(&mut self) -> u64 { + next_u64(&mut |v| self.fill_bytes(v)) + } + fn fill_bytes(&mut self, v: &mut [u8]) { + let mib = [libc::CTL_KERN, libc::KERN_ARND]; + // kern.arandom permits a maximum buffer size of 256 bytes + for s in v.chunks_mut(256) { + let mut s_len = s.len(); + let ret = unsafe { + libc::sysctl(mib.as_ptr(), mib.len() as libc::c_uint, + s.as_mut_ptr() as *mut _, &mut s_len, + ptr::null(), 0) + }; + if ret == -1 || s_len != s.len() { + panic!("kern.arandom sysctl failed! (returned {}, s.len() {}, oldlenp {})", + ret, s.len(), s_len); + } + } + } + } +} diff --git a/src/libstd/sys/windows/fs.rs b/src/libstd/sys/windows/fs.rs index 4e6cef9a28d8f..393500c84fb62 100644 --- a/src/libstd/sys/windows/fs.rs +++ b/src/libstd/sys/windows/fs.rs @@ -312,6 +312,10 @@ impl File { self.handle.read(buf) } + pub fn read_at(&self, buf: &mut [u8], offset: u64) -> io::Result { + self.handle.read_at(buf, offset) + } + pub fn read_to_end(&self, buf: &mut Vec) -> io::Result { self.handle.read_to_end(buf) } @@ -320,6 +324,10 @@ impl File { self.handle.write(buf) } + pub fn write_at(&self, buf: &[u8], offset: u64) -> io::Result { + self.handle.write_at(buf, offset) + } + pub fn flush(&self) -> io::Result<()> { Ok(()) } pub fn seek(&self, pos: SeekFrom) -> io::Result { diff --git a/src/libstd/sys/windows/handle.rs b/src/libstd/sys/windows/handle.rs index d10abae286527..98ee03fc70c6f 100644 --- a/src/libstd/sys/windows/handle.rs +++ b/src/libstd/sys/windows/handle.rs @@ -106,6 +106,19 @@ impl RawHandle { } } + pub fn read_at(&self, buf: &mut [u8], offset: u64) -> io::Result { + let mut read = 0; + let len = cmp::min(buf.len(), ::max_value() as usize) as c::DWORD; + unsafe { + let mut overlapped: c::OVERLAPPED = mem::zeroed(); + overlapped.Offset = offset as u32; + overlapped.OffsetHigh = (offset >> 32) as u32; + cvt(c::ReadFile(self.0, buf.as_mut_ptr() as c::LPVOID, + len, &mut read, &mut overlapped))?; + } + Ok(read as usize) + } + pub unsafe fn read_overlapped(&self, buf: &mut [u8], overlapped: *mut c::OVERLAPPED) @@ -176,6 +189,19 @@ impl RawHandle { Ok(amt as usize) } + pub fn write_at(&self, buf: &[u8], offset: u64) -> io::Result { + let mut written = 0; + let len = cmp::min(buf.len(), ::max_value() as usize) as c::DWORD; + unsafe { + let mut overlapped: c::OVERLAPPED = mem::zeroed(); + overlapped.Offset = offset as u32; + overlapped.OffsetHigh = (offset >> 32) as u32; + cvt(c::WriteFile(self.0, buf.as_ptr() as c::LPVOID, + len, &mut written, &mut overlapped))?; + } + Ok(written as usize) + } + pub fn duplicate(&self, access: c::DWORD, inherit: bool, options: c::DWORD) -> io::Result { let mut ret = 0 as c::HANDLE; diff --git a/src/rust-installer b/src/rust-installer index c37d3747da75c..755bc3db4ff79 160000 --- a/src/rust-installer +++ b/src/rust-installer @@ -1 +1 @@ -Subproject commit c37d3747da75c280237dc2d6b925078e69555499 +Subproject commit 755bc3db4ff795865ea31b5b4f38ac920d8acacb diff --git a/src/rustllvm/RustWrapper.cpp b/src/rustllvm/RustWrapper.cpp index 0da25e7ac57b7..82fb2b0918f79 100644 --- a/src/rustllvm/RustWrapper.cpp +++ b/src/rustllvm/RustWrapper.cpp @@ -521,6 +521,15 @@ extern "C" LLVMRustMetadataRef LLVMRustDIBuilderCreateLexicalBlock( )); } +extern "C" LLVMRustMetadataRef LLVMRustDIBuilderCreateLexicalBlockFile( + LLVMRustDIBuilderRef Builder, + LLVMRustMetadataRef Scope, + LLVMRustMetadataRef File) { + return wrap(Builder->createLexicalBlockFile( + unwrapDI(Scope), + unwrapDI(File))); +} + extern "C" LLVMRustMetadataRef LLVMRustDIBuilderCreateStaticVariable( LLVMRustDIBuilderRef Builder, LLVMRustMetadataRef Context, diff --git a/src/test/compile-fail/E0195.rs b/src/test/compile-fail/E0195.rs index 0630dfea5e64b..06dd903b23db8 100644 --- a/src/test/compile-fail/E0195.rs +++ b/src/test/compile-fail/E0195.rs @@ -16,6 +16,7 @@ struct Foo; impl Trait for Foo { fn bar<'a,'b>(x: &'a str, y: &'b str) { //~ ERROR E0195 + //~^ lifetimes do not match trait } } diff --git a/src/test/compile-fail/E0478.rs b/src/test/compile-fail/E0478.rs new file mode 100644 index 0000000000000..8eb4003fc9734 --- /dev/null +++ b/src/test/compile-fail/E0478.rs @@ -0,0 +1,18 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +trait Wedding<'t>: 't { } + +struct Prince<'kiss, 'SnowWhite> { + child: Box + 'SnowWhite>, //~ ERROR E0478 +} + +fn main() { +} diff --git a/src/test/compile-fail/E0492.rs b/src/test/compile-fail/E0492.rs new file mode 100644 index 0000000000000..8e4964c97c593 --- /dev/null +++ b/src/test/compile-fail/E0492.rs @@ -0,0 +1,17 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT}; + +const A: AtomicUsize = ATOMIC_USIZE_INIT; +static B: &'static AtomicUsize = &A; //~ ERROR E0492 + +fn main() { +} diff --git a/src/test/compile-fail/E0493.rs b/src/test/compile-fail/E0493.rs new file mode 100644 index 0000000000000..689f469533d96 --- /dev/null +++ b/src/test/compile-fail/E0493.rs @@ -0,0 +1,22 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +struct Foo { + a: u32 +} + +impl Drop for Foo { + fn drop(&mut self) {} +} + +const F : Foo = Foo { a : 0 }; //~ ERROR E0493 + +fn main() { +} diff --git a/src/test/compile-fail/E0494.rs b/src/test/compile-fail/E0494.rs new file mode 100644 index 0000000000000..5f8632ac1c23d --- /dev/null +++ b/src/test/compile-fail/E0494.rs @@ -0,0 +1,19 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +struct Foo { + a: u32 +} + +static S : Foo = Foo { a : 0 }; +static A : &'static u32 = &S.a; //~ ERROR E0494 + +fn main() { +} diff --git a/src/test/compile-fail/E0496.rs b/src/test/compile-fail/E0496.rs new file mode 100644 index 0000000000000..4ca3cd9c13da6 --- /dev/null +++ b/src/test/compile-fail/E0496.rs @@ -0,0 +1,21 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +struct Foo<'a> { + a: &'a i32, +} + +impl<'a> Foo<'a> { + fn f<'a>(x: &'a i32) { //~ ERROR E0496 + } +} + +fn main() { +} diff --git a/src/test/compile-fail/E0499.rs b/src/test/compile-fail/E0499.rs new file mode 100644 index 0000000000000..9a64bfe2ea9e7 --- /dev/null +++ b/src/test/compile-fail/E0499.rs @@ -0,0 +1,15 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +fn main() { + let mut i = 0; + let mut x = &mut i; + let mut a = &mut i; //~ ERROR E0499 +} diff --git a/src/test/compile-fail/E0501.rs b/src/test/compile-fail/E0501.rs new file mode 100644 index 0000000000000..04678b96c8d08 --- /dev/null +++ b/src/test/compile-fail/E0501.rs @@ -0,0 +1,25 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +fn inside_closure(x: &mut i32) { +} + +fn outside_closure(x: &mut i32) { +} + +fn foo(a: &mut i32) { + let bar = || { + inside_closure(a) + }; + outside_closure(a); //~ ERROR E0501 +} + +fn main() { +} diff --git a/src/test/compile-fail/diverging-fn-tail-35849.rs b/src/test/compile-fail/diverging-fn-tail-35849.rs new file mode 100644 index 0000000000000..6dc447b4dc887 --- /dev/null +++ b/src/test/compile-fail/diverging-fn-tail-35849.rs @@ -0,0 +1,16 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +fn _converge() -> ! { //~ ERROR computation may converge + 42 +} + +fn main() { } + diff --git a/src/test/compile-fail/issue-16048.rs b/src/test/compile-fail/issue-16048.rs index ceac7e968f65c..5012556dedddc 100644 --- a/src/test/compile-fail/issue-16048.rs +++ b/src/test/compile-fail/issue-16048.rs @@ -29,6 +29,7 @@ impl<'a> Test<'a> for Foo<'a> { impl<'a> NoLifetime for Foo<'a> { fn get<'p, T : Test<'a>>(&self) -> T { //~^ ERROR E0195 +//~| lifetimes do not match trait return *self as T; } } diff --git a/src/test/debuginfo/auxiliary/macro-stepping.rs b/src/test/debuginfo/auxiliary/macro-stepping.rs new file mode 100644 index 0000000000000..1006b684a8c22 --- /dev/null +++ b/src/test/debuginfo/auxiliary/macro-stepping.rs @@ -0,0 +1,20 @@ +// Copyright 2013-2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// compile-flags:-g + +#![crate_type = "rlib"] + +#[macro_export] +macro_rules! new_scope { + () => { + let x = 1; + } +} diff --git a/src/test/debuginfo/lexical-scope-with-macro.rs b/src/test/debuginfo/lexical-scope-with-macro.rs index a00d0f74f1e4e..eb5798dc7cc48 100644 --- a/src/test/debuginfo/lexical-scope-with-macro.rs +++ b/src/test/debuginfo/lexical-scope-with-macro.rs @@ -10,7 +10,7 @@ // min-lldb-version: 310 -// compile-flags:-g +// compile-flags:-g -Zdebug-macros // === GDB TESTS =================================================================================== diff --git a/src/test/debuginfo/macro-stepping.rs b/src/test/debuginfo/macro-stepping.rs new file mode 100644 index 0000000000000..612d2821e11c3 --- /dev/null +++ b/src/test/debuginfo/macro-stepping.rs @@ -0,0 +1,102 @@ +// Copyright 2013-2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// ignore-windows +// min-lldb-version: 310 + +// aux-build:macro-stepping.rs + +#![allow(unused)] + +#[macro_use] +extern crate macro_stepping; // exports new_scope!() + +// compile-flags:-g + +// === GDB TESTS =================================================================================== + +// gdb-command:run +// gdb-command:next +// gdb-command:frame +// gdb-check:[...]#loc1[...] +// gdb-command:next +// gdb-command:frame +// gdb-check:[...]#loc2[...] +// gdb-command:next +// gdb-command:frame +// gdb-check:[...]#loc3[...] +// gdb-command:next +// gdb-command:frame +// gdb-check:[...]#loc4[...] +// gdb-command:next +// gdb-command:frame +// gdb-check:[...]#loc5[...] +// gdb-command:next +// gdb-command:frame +// gdb-check:[...]#loc6[...] + +// === LLDB TESTS ================================================================================== + +// lldb-command:set set stop-line-count-before 0 +// lldb-command:set set stop-line-count-after 1 +// Can't set both to zero or lldb will stop printing source at all. So it will output the current +// line and the next. We deal with this by having at least 2 lines between the #loc's + +// lldb-command:run +// lldb-command:next +// lldb-command:frame select +// lldb-check:[...]#loc1[...] +// lldb-command:next +// lldb-command:frame select +// lldb-check:[...]#loc2[...] +// lldb-command:next +// lldb-command:frame select +// lldb-check:[...]#loc3[...] +// lldb-command:next +// lldb-command:frame select +// lldb-check:[...]#loc4[...] +// lldb-command:next +// lldb-command:frame select +// lldb-check:[...]#loc5[...] + +macro_rules! foo { + () => { + let a = 1; + let b = 2; + let c = 3; + } +} + +macro_rules! foo2 { + () => { + foo!(); + let x = 1; + foo!(); + } +} + +fn main() { + zzz(); // #break + + foo!(); // #loc1 + + foo2!(); // #loc2 + + let x = vec![42]; // #loc3 + + new_scope!(); // #loc4 + + println!("Hello {}", // #loc5 + "world"); + + zzz(); // #loc6 +} + +fn zzz() {()} diff --git a/src/test/run-fail/call-fn-never-arg.rs b/src/test/run-fail/call-fn-never-arg.rs index 95101e70db951..b1aa76cd9bfe5 100644 --- a/src/test/run-fail/call-fn-never-arg.rs +++ b/src/test/run-fail/call-fn-never-arg.rs @@ -10,6 +10,7 @@ // Test that we can use a ! for an argument of type ! +// ignore-test FIXME(durka) can't be done with the current liveness code // error-pattern:wowzers! #![feature(never_type)] diff --git a/src/test/run-pass/diverging-fn-tail-35849.rs b/src/test/run-pass/diverging-fn-tail-35849.rs new file mode 100644 index 0000000000000..6c05a02e7183c --- /dev/null +++ b/src/test/run-pass/diverging-fn-tail-35849.rs @@ -0,0 +1,18 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +fn assert_sizeof() -> ! { + unsafe { + ::std::mem::transmute::(panic!()) + } +} + +fn main() { } + diff --git a/src/test/run-pass/fds-are-cloexec.rs b/src/test/run-pass/fds-are-cloexec.rs index c2916ccd75b8e..b7ce622bf563e 100644 --- a/src/test/run-pass/fds-are-cloexec.rs +++ b/src/test/run-pass/fds-are-cloexec.rs @@ -34,7 +34,7 @@ fn main() { } fn parent() { - let file = File::open("Makefile").unwrap(); + let file = File::open("src/test/run-pass/fds-are-cloexec.rs").unwrap(); let tcp1 = TcpListener::bind("127.0.0.1:0").unwrap(); let tcp2 = tcp1.try_clone().unwrap(); let addr = tcp1.local_addr().unwrap(); diff --git a/src/test/run-pass/mir_heavy_promoted.rs b/src/test/run-pass/mir_heavy_promoted.rs new file mode 100644 index 0000000000000..9e033421574b9 --- /dev/null +++ b/src/test/run-pass/mir_heavy_promoted.rs @@ -0,0 +1,18 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +const TEST_DATA: [u8; 32 * 1024 * 1024] = [42; 32 * 1024 * 1024]; + +// Check that the promoted copy of TEST_DATA doesn't +// leave an alloca from an unused temp behind, which, +// without optimizations, can still blow the stack. +fn main() { + println!("{}", TEST_DATA.len()); +}