diff --git a/Cargo.lock b/Cargo.lock index fbac2c7879d12..1f78c5f3f7569 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1966,9 +1966,9 @@ dependencies = [ [[package]] name = "measureme" -version = "0.3.0" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d09de7dafa3aa334bc806447c7e4de69419723312f4b88b80b561dea66601ce8" +checksum = "cd21b0e6e1af976b269ce062038fe5e1b9ca2f817ab7a3af09ec4210aebf0d30" dependencies = [ "byteorder", "memmap", diff --git a/src/libcore/array/iter.rs b/src/libcore/array/iter.rs index 850a599c6599f..11803238407c8 100644 --- a/src/libcore/array/iter.rs +++ b/src/libcore/array/iter.rs @@ -13,7 +13,7 @@ use super::LengthAtMost32; /// A by-value [array] iterator. /// /// [array]: ../../std/primitive.array.html -#[unstable(feature = "array_value_iter", issue = "0")] +#[unstable(feature = "array_value_iter", issue = "65798")] pub struct IntoIter where [T; N]: LengthAtMost32, @@ -49,7 +49,7 @@ where /// *Note*: this method might never get stabilized and/or removed in the /// future as there will likely be another, preferred way of obtaining this /// iterator (either via `IntoIterator` for arrays or via another way). - #[unstable(feature = "array_value_iter", issue = "0")] + #[unstable(feature = "array_value_iter", issue = "65798")] pub fn new(array: [T; N]) -> Self { // The transmute here is actually safe. The docs of `MaybeUninit` // promise: @@ -95,7 +95,7 @@ where } -#[stable(feature = "array_value_iter_impls", since = "1.38.0")] +#[stable(feature = "array_value_iter_impls", since = "1.40.0")] impl Iterator for IntoIter where [T; N]: LengthAtMost32, @@ -141,7 +141,7 @@ where } } -#[stable(feature = "array_value_iter_impls", since = "1.38.0")] +#[stable(feature = "array_value_iter_impls", since = "1.40.0")] impl DoubleEndedIterator for IntoIter where [T; N]: LengthAtMost32, @@ -176,7 +176,7 @@ where } } -#[stable(feature = "array_value_iter_impls", since = "1.38.0")] +#[stable(feature = "array_value_iter_impls", since = "1.40.0")] impl Drop for IntoIter where [T; N]: LengthAtMost32, @@ -189,7 +189,7 @@ where } } -#[stable(feature = "array_value_iter_impls", since = "1.38.0")] +#[stable(feature = "array_value_iter_impls", since = "1.40.0")] impl ExactSizeIterator for IntoIter where [T; N]: LengthAtMost32, @@ -204,7 +204,7 @@ where } } -#[stable(feature = "array_value_iter_impls", since = "1.38.0")] +#[stable(feature = "array_value_iter_impls", since = "1.40.0")] impl FusedIterator for IntoIter where [T; N]: LengthAtMost32, @@ -214,13 +214,13 @@ where // elements (that will still be yielded) is the length of the range `alive`. // This range is decremented in length in either `next` or `next_back`. It is // always decremented by 1 in those methods, but only if `Some(_)` is returned. -#[stable(feature = "array_value_iter_impls", since = "1.38.0")] +#[stable(feature = "array_value_iter_impls", since = "1.40.0")] unsafe impl TrustedLen for IntoIter where [T; N]: LengthAtMost32, {} -#[stable(feature = "array_value_iter_impls", since = "1.38.0")] +#[stable(feature = "array_value_iter_impls", since = "1.40.0")] impl Clone for IntoIter where [T; N]: LengthAtMost32, @@ -251,7 +251,7 @@ where } } -#[stable(feature = "array_value_iter_impls", since = "1.38.0")] +#[stable(feature = "array_value_iter_impls", since = "1.40.0")] impl fmt::Debug for IntoIter where [T; N]: LengthAtMost32, diff --git a/src/libcore/array/mod.rs b/src/libcore/array/mod.rs index 120658e9a4343..e1ec8b795d04c 100644 --- a/src/libcore/array/mod.rs +++ b/src/libcore/array/mod.rs @@ -18,7 +18,7 @@ use crate::slice::{Iter, IterMut}; mod iter; #[cfg(not(bootstrap))] -#[unstable(feature = "array_value_iter", issue = "0")] +#[unstable(feature = "array_value_iter", issue = "65798")] pub use iter::IntoIter; /// Utility trait implemented only on arrays of fixed size diff --git a/src/libcore/intrinsics.rs b/src/libcore/intrinsics.rs index b240d059114eb..bae7ae2884f90 100644 --- a/src/libcore/intrinsics.rs +++ b/src/libcore/intrinsics.rs @@ -1339,6 +1339,10 @@ extern "rust-intrinsic" { /// Emits a `!nontemporal` store according to LLVM (see their docs). /// Probably will never become stable. pub fn nontemporal_store(ptr: *mut T, val: T); + + /// See documentation of `<*const T>::offset_from` for details. + #[cfg(not(bootstrap))] + pub fn ptr_offset_from(ptr: *const T, base: *const T) -> isize; } // Some functions are defined here because they accidentally got made diff --git a/src/libcore/ptr/mod.rs b/src/libcore/ptr/mod.rs index 3cc0a1cd75e88..1355ce1aa43b7 100644 --- a/src/libcore/ptr/mod.rs +++ b/src/libcore/ptr/mod.rs @@ -1286,7 +1286,22 @@ impl *const T { /// } /// ``` #[unstable(feature = "ptr_offset_from", issue = "41079")] + #[cfg(not(bootstrap))] + #[rustc_const_unstable(feature = "const_ptr_offset_from")] #[inline] + pub const unsafe fn offset_from(self, origin: *const T) -> isize where T: Sized { + let pointee_size = mem::size_of::(); + let ok = 0 < pointee_size && pointee_size <= isize::max_value() as usize; + // assert that the pointee size is valid in a const eval compatible way + // FIXME: do this with a real assert at some point + [()][(!ok) as usize]; + intrinsics::ptr_offset_from(self, origin) + } + + #[unstable(feature = "ptr_offset_from", issue = "41079")] + #[inline] + #[cfg(bootstrap)] + /// bootstrap pub unsafe fn offset_from(self, origin: *const T) -> isize where T: Sized { let pointee_size = mem::size_of::(); assert!(0 < pointee_size && pointee_size <= isize::max_value() as usize); @@ -2013,8 +2028,9 @@ impl *mut T { /// } /// ``` #[unstable(feature = "ptr_offset_from", issue = "41079")] + #[rustc_const_unstable(feature = "const_ptr_offset_from")] #[inline] - pub unsafe fn offset_from(self, origin: *const T) -> isize where T: Sized { + pub const unsafe fn offset_from(self, origin: *const T) -> isize where T: Sized { (self as *const T).offset_from(origin) } diff --git a/src/libcore/slice/mod.rs b/src/libcore/slice/mod.rs index 4e79ea812044b..185913a47f1a2 100644 --- a/src/libcore/slice/mod.rs +++ b/src/libcore/slice/mod.rs @@ -28,7 +28,7 @@ use crate::fmt; use crate::intrinsics::{assume, exact_div, unchecked_sub, is_aligned_and_not_null}; use crate::isize; use crate::iter::*; -use crate::ops::{FnMut, self}; +use crate::ops::{FnMut, Range, self}; use crate::option::Option; use crate::option::Option::{None, Some}; use crate::result::Result; @@ -407,6 +407,83 @@ impl [T] { self as *mut [T] as *mut T } + /// Returns the two raw pointers spanning the slice. + /// + /// The returned range is half-open, which means that the end pointer + /// points *one past* the last element of the slice. This way, an empty + /// slice is represented by two equal pointers, and the difference between + /// the two pointers represents the size of the size. + /// + /// See [`as_ptr`] for warnings on using these pointers. The end pointer + /// requires extra caution, as it does not point to a valid element in the + /// slice. + /// + /// This function is useful for interacting with foreign interfaces which + /// use two pointers to refer to a range of elements in memory, as is + /// common in C++. + /// + /// It can also be useful to check if a reference or pointer to an element + /// refers to an element of this slice: + /// + /// ``` + /// let a = [1,2,3]; + /// let x = &a[1]; + /// let y = &5; + /// assert!(a.as_ptr_range().contains(x)); + /// assert!(!a.as_ptr_range().contains(y)); + /// ``` + /// + /// [`as_ptr`]: #method.as_ptr + #[unstable(feature = "slice_ptr_range", issue = "65807")] + #[inline] + pub fn as_ptr_range(&self) -> Range<*const T> { + // The `add` here is safe, because: + // + // - Both pointers are part of the same object, as pointing directly + // past the object also counts. + // + // - The size of the slice is never larger than isize::MAX bytes, as + // noted here: + // - https://github.com/rust-lang/unsafe-code-guidelines/issues/102#issuecomment-473340447 + // - https://doc.rust-lang.org/reference/behavior-considered-undefined.html + // - https://doc.rust-lang.org/core/slice/fn.from_raw_parts.html#safety + // (This doesn't seem normative yet, but the very same assumption is + // made in many places, including the Index implementation of slices.) + // + // - There is no wrapping around involved, as slices do not wrap past + // the end of the address space. + // + // See the documentation of pointer::add. + let start = self.as_ptr(); + let end = unsafe { start.add(self.len()) }; + start..end + } + + /// Returns the two unsafe mutable pointers spanning the slice. + /// + /// The returned range is half-open, which means that the end pointer + /// points *one past* the last element of the slice. This way, an empty + /// slice is represented by two equal pointers, and the difference between + /// the two pointers represents the size of the size. + /// + /// See [`as_mut_ptr`] for warnings on using these pointers. The end + /// pointer requires extra caution, as it does not point to a valid element + /// in the slice. + /// + /// This function is useful for interacting with foreign interfaces which + /// use two pointers to refer to a range of elements in memory, as is + /// common in C++. + /// + /// [`as_mut_ptr`]: #method.as_mut_ptr + #[unstable(feature = "slice_ptr_range", issue = "65807")] + #[inline] + pub fn as_mut_ptr_range(&mut self) -> Range<*mut T> { + // See as_ptr_range() above for why `add` here is safe. + let start = self.as_mut_ptr(); + let end = unsafe { start.add(self.len()) }; + start..end + } + /// Swaps two elements in the slice. /// /// # Arguments diff --git a/src/librustc/Cargo.toml b/src/librustc/Cargo.toml index 93274ef0c927c..38631224fd359 100644 --- a/src/librustc/Cargo.toml +++ b/src/librustc/Cargo.toml @@ -37,4 +37,4 @@ byteorder = { version = "1.3" } chalk-engine = { version = "0.9.0", default-features=false } rustc_fs_util = { path = "../librustc_fs_util" } smallvec = { version = "0.6.8", features = ["union", "may_dangle"] } -measureme = "0.3" +measureme = "0.4" diff --git a/src/librustc/ty/query/plumbing.rs b/src/librustc/ty/query/plumbing.rs index 41b4883793b54..538154b035ac6 100644 --- a/src/librustc/ty/query/plumbing.rs +++ b/src/librustc/ty/query/plumbing.rs @@ -90,6 +90,10 @@ impl<'a, 'tcx, Q: QueryDescription<'tcx>> JobOwner<'a, 'tcx, Q> { } return TryGetJob::JobCompleted(result); } + + #[cfg(parallel_compiler)] + let query_blocked_prof_timer; + let job = match lock.active.entry((*key).clone()) { Entry::Occupied(entry) => { match *entry.get() { @@ -98,7 +102,9 @@ impl<'a, 'tcx, Q: QueryDescription<'tcx>> JobOwner<'a, 'tcx, Q> { // in another thread has completed. Record how long we wait in the // self-profiler. #[cfg(parallel_compiler)] - tcx.prof.query_blocked_start(Q::NAME); + { + query_blocked_prof_timer = tcx.prof.query_blocked(Q::NAME); + } job.clone() }, @@ -140,7 +146,11 @@ impl<'a, 'tcx, Q: QueryDescription<'tcx>> JobOwner<'a, 'tcx, Q> { #[cfg(parallel_compiler)] { let result = job.r#await(tcx, span); - tcx.prof.query_blocked_end(Q::NAME); + + // This `drop()` is not strictly necessary as the binding + // would go out of scope anyway. But it's good to have an + // explicit marker of how far the measurement goes. + drop(query_blocked_prof_timer); if let Err(cycle) = result { return TryGetJob::Cycle(Q::handle_cycle_error(tcx, cycle)); diff --git a/src/librustc/util/profiling.rs b/src/librustc/util/profiling.rs index bd02e7f5a14a1..5a1b7f3aa4cb8 100644 --- a/src/librustc/util/profiling.rs +++ b/src/librustc/util/profiling.rs @@ -14,9 +14,12 @@ use measureme::{StringId, TimestampKind}; /// MmapSerializatioSink is faster on macOS and Linux /// but FileSerializationSink is faster on Windows #[cfg(not(windows))] -type Profiler = measureme::Profiler; +type SerializationSink = measureme::MmapSerializationSink; #[cfg(windows)] -type Profiler = measureme::Profiler; +type SerializationSink = measureme::FileSerializationSink; + +type Profiler = measureme::Profiler; + #[derive(Clone, Copy, Debug, PartialEq, Eq, Ord, PartialOrd)] pub enum ProfileCategory { @@ -131,32 +134,6 @@ impl SelfProfilerRef { }) } - /// Start profiling a generic activity. Profiling continues until - /// `generic_activity_end` is called. The RAII-based `generic_activity` - /// usually is the better alternative. - #[inline(always)] - pub fn generic_activity_start(&self, event_id: &str) { - self.non_guard_generic_event( - |profiler| profiler.generic_activity_event_kind, - |profiler| profiler.profiler.alloc_string(event_id), - EventFilter::GENERIC_ACTIVITIES, - TimestampKind::Start, - ); - } - - /// End profiling a generic activity that was started with - /// `generic_activity_start`. The RAII-based `generic_activity` usually is - /// the better alternative. - #[inline(always)] - pub fn generic_activity_end(&self, event_id: &str) { - self.non_guard_generic_event( - |profiler| profiler.generic_activity_event_kind, - |profiler| profiler.profiler.alloc_string(event_id), - EventFilter::GENERIC_ACTIVITIES, - TimestampKind::End, - ); - } - /// Start profiling a query provider. Profiling continues until the /// TimingGuard returned from this call is dropped. #[inline(always)] @@ -179,26 +156,14 @@ impl SelfProfilerRef { } /// Start profiling a query being blocked on a concurrent execution. - /// Profiling continues until `query_blocked_end` is called. - #[inline(always)] - pub fn query_blocked_start(&self, query_name: QueryName) { - self.non_guard_query_event( - |profiler| profiler.query_blocked_event_kind, - query_name, - EventFilter::QUERY_BLOCKED, - TimestampKind::Start, - ); - } - - /// End profiling a query being blocked on a concurrent execution. + /// Profiling continues until the TimingGuard returned from this call is + /// dropped. #[inline(always)] - pub fn query_blocked_end(&self, query_name: QueryName) { - self.non_guard_query_event( - |profiler| profiler.query_blocked_event_kind, - query_name, - EventFilter::QUERY_BLOCKED, - TimestampKind::End, - ); + pub fn query_blocked(&self, query_name: QueryName) -> TimingGuard<'_> { + self.exec(EventFilter::QUERY_BLOCKED, |profiler| { + let event_id = SelfProfiler::get_query_name_string_id(query_name); + TimingGuard::start(profiler, profiler.query_blocked_event_kind, event_id) + }) } /// Start profiling how long it takes to load a query result from the @@ -238,28 +203,6 @@ impl SelfProfilerRef { TimingGuard::none() })); } - - #[inline(always)] - fn non_guard_generic_event StringId>( - &self, - event_kind: fn(&SelfProfiler) -> StringId, - event_id: F, - event_filter: EventFilter, - timestamp_kind: TimestampKind - ) { - drop(self.exec(event_filter, |profiler| { - let thread_id = thread_id_to_u64(std::thread::current().id()); - - profiler.profiler.record_event( - event_kind(profiler), - event_id(profiler), - thread_id, - timestamp_kind, - ); - - TimingGuard::none() - })); - } } pub struct SelfProfiler { @@ -346,14 +289,7 @@ impl SelfProfiler { } #[must_use] -pub struct TimingGuard<'a>(Option>); - -struct TimingGuardInternal<'a> { - raw_profiler: &'a Profiler, - event_id: StringId, - event_kind: StringId, - thread_id: u64, -} +pub struct TimingGuard<'a>(Option>); impl<'a> TimingGuard<'a> { #[inline] @@ -364,14 +300,10 @@ impl<'a> TimingGuard<'a> { ) -> TimingGuard<'a> { let thread_id = thread_id_to_u64(std::thread::current().id()); let raw_profiler = &profiler.profiler; - raw_profiler.record_event(event_kind, event_id, thread_id, TimestampKind::Start); - - TimingGuard(Some(TimingGuardInternal { - raw_profiler, - event_kind, - event_id, - thread_id, - })) + let timing_guard = raw_profiler.start_recording_interval_event(event_kind, + event_id, + thread_id); + TimingGuard(Some(timing_guard)) } #[inline] @@ -379,15 +311,3 @@ impl<'a> TimingGuard<'a> { TimingGuard(None) } } - -impl<'a> Drop for TimingGuardInternal<'a> { - #[inline] - fn drop(&mut self) { - self.raw_profiler.record_event( - self.event_kind, - self.event_id, - self.thread_id, - TimestampKind::End - ); - } -} diff --git a/src/librustc_codegen_llvm/intrinsic.rs b/src/librustc_codegen_llvm/intrinsic.rs index 3df8d4c26903b..97bd57a7ded06 100644 --- a/src/librustc_codegen_llvm/intrinsic.rs +++ b/src/librustc_codegen_llvm/intrinsic.rs @@ -19,6 +19,7 @@ use rustc::mir::interpret::GlobalId; use rustc_codegen_ssa::common::{IntPredicate, TypeKind}; use rustc::hir; use syntax::ast::{self, FloatTy}; +use rustc_target::abi::HasDataLayout; use rustc_codegen_ssa::common::span_invalid_monomorphization_error; use rustc_codegen_ssa::traits::*; @@ -694,6 +695,23 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> { return; } + "ptr_offset_from" => { + let ty = substs.type_at(0); + let pointee_size = self.size_of(ty); + + // This is the same sequence that Clang emits for pointer subtraction. + // It can be neither `nsw` nor `nuw` because the input is treated as + // unsigned but then the output is treated as signed, so neither works. + let a = args[0].immediate(); + let b = args[1].immediate(); + let a = self.ptrtoint(a, self.type_isize()); + let b = self.ptrtoint(b, self.type_isize()); + let d = self.sub(a, b); + let pointee_size = self.const_usize(pointee_size.bytes()); + // this is where the signed magic happens (notice the `s` in `exactsdiv`) + self.exactsdiv(d, pointee_size) + } + _ => bug!("unknown intrinsic '{}'", name), }; @@ -1224,7 +1242,6 @@ fn generic_simd_intrinsic( // The `fn simd_bitmask(vector) -> unsigned integer` intrinsic takes a // vector mask and returns an unsigned integer containing the most // significant bit (MSB) of each lane. - use rustc_target::abi::HasDataLayout; // If the vector has less than 8 lanes, an u8 is returned with zeroed // trailing bits. diff --git a/src/librustc_mir/interpret/intrinsics.rs b/src/librustc_mir/interpret/intrinsics.rs index 5fc23b4a69ec5..e3d3ff70c4e8a 100644 --- a/src/librustc_mir/interpret/intrinsics.rs +++ b/src/librustc_mir/interpret/intrinsics.rs @@ -12,7 +12,7 @@ use rustc::mir::BinOp; use rustc::mir::interpret::{InterpResult, Scalar, GlobalId, ConstValue}; use super::{ - Machine, PlaceTy, OpTy, InterpCx, + Machine, PlaceTy, OpTy, InterpCx, ImmTy, }; mod type_name; @@ -236,6 +236,29 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { let result = Scalar::from_uint(truncated_bits, layout.size); self.write_scalar(result, dest)?; } + + "ptr_offset_from" => { + let a = self.read_immediate(args[0])?.to_scalar()?.to_ptr()?; + let b = self.read_immediate(args[1])?.to_scalar()?.to_ptr()?; + if a.alloc_id != b.alloc_id { + throw_ub_format!( + "ptr_offset_from cannot compute offset of pointers into different \ + allocations.", + ); + } + let usize_layout = self.layout_of(self.tcx.types.usize)?; + let a_offset = ImmTy::from_uint(a.offset.bytes(), usize_layout); + let b_offset = ImmTy::from_uint(b.offset.bytes(), usize_layout); + let (val, _overflowed, _ty) = self.overflowing_binary_op( + BinOp::Sub, a_offset, b_offset, + )?; + let pointee_layout = self.layout_of(substs.type_at(0))?; + let isize_layout = self.layout_of(self.tcx.types.isize)?; + let val = ImmTy::from_scalar(val, isize_layout); + let size = ImmTy::from_int(pointee_layout.size.bytes(), isize_layout); + self.exact_div(val, size, dest)?; + } + "transmute" => { self.copy_op_transmute(args[0], dest)?; } @@ -340,4 +363,30 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { return Ok(false); } } + + pub fn exact_div( + &mut self, + a: ImmTy<'tcx, M::PointerTag>, + b: ImmTy<'tcx, M::PointerTag>, + dest: PlaceTy<'tcx, M::PointerTag>, + ) -> InterpResult<'tcx> { + // Performs an exact division, resulting in undefined behavior where + // `x % y != 0` or `y == 0` or `x == T::min_value() && y == -1`. + // First, check x % y != 0. + if self.binary_op(BinOp::Rem, a, b)?.to_bits()? != 0 { + // Then, check if `b` is -1, which is the "min_value / -1" case. + let minus1 = Scalar::from_int(-1, dest.layout.size); + let b = b.to_scalar().unwrap(); + if b == minus1 { + throw_ub_format!("exact_div: result of dividing MIN by -1 cannot be represented") + } else { + throw_ub_format!( + "exact_div: {} cannot be divided by {} without remainder", + a.to_scalar().unwrap(), + b, + ) + } + } + self.binop_ignore_overflow(BinOp::Div, a, b, dest) + } } diff --git a/src/librustc_mir/transform/qualify_consts.rs b/src/librustc_mir/transform/qualify_consts.rs index da1abb9747c1a..713a5b4ced0e2 100644 --- a/src/librustc_mir/transform/qualify_consts.rs +++ b/src/librustc_mir/transform/qualify_consts.rs @@ -560,6 +560,7 @@ impl Qualif for IsNotPromotable { | "transmute" | "simd_insert" | "simd_extract" + | "ptr_offset_from" => return true, _ => {} diff --git a/src/librustc_typeck/check/intrinsic.rs b/src/librustc_typeck/check/intrinsic.rs index 72a0fe887b964..3378cab95debc 100644 --- a/src/librustc_typeck/check/intrinsic.rs +++ b/src/librustc_typeck/check/intrinsic.rs @@ -307,6 +307,8 @@ pub fn check_intrinsic_type(tcx: TyCtxt<'_>, it: &hir::ForeignItem) { (1, vec![param(0), param(0)], tcx.intern_tup(&[param(0), tcx.types.bool])), + "ptr_offset_from" => + (1, vec![ tcx.mk_imm_ptr(param(0)), tcx.mk_imm_ptr(param(0)) ], tcx.types.isize), "unchecked_div" | "unchecked_rem" | "exact_div" => (1, vec![param(0), param(0)], param(0)), "unchecked_shl" | "unchecked_shr" | diff --git a/src/libstd/sys/sgx/abi/entry.S b/src/libstd/sys/sgx/abi/entry.S index c35e49b1dc6ea..cd26c7ca200b0 100644 --- a/src/libstd/sys/sgx/abi/entry.S +++ b/src/libstd/sys/sgx/abi/entry.S @@ -119,8 +119,14 @@ sgx_entry: mov %rbx,%gs:tcsls_tcs_addr stmxcsr %gs:tcsls_user_mxcsr fnstcw %gs:tcsls_user_fcw + /* reset user state */ - cld /* x86-64 ABI requires DF to be unset at function entry/exit */ +/* - DF flag: x86-64 ABI requires DF to be unset at function entry/exit */ +/* - AC flag: AEX on misaligned memory accesses leaks side channel info */ + pushfq + andq $~0x40400, (%rsp) + popfq + /* check for debug buffer pointer */ testb $0xff,DEBUG(%rip) jz .Lskip_debug_init diff --git a/src/test/ui/consts/offset_from.rs b/src/test/ui/consts/offset_from.rs new file mode 100644 index 0000000000000..4d6800681889c --- /dev/null +++ b/src/test/ui/consts/offset_from.rs @@ -0,0 +1,47 @@ +// run-pass + +#![feature(const_raw_ptr_deref)] +#![feature(const_ptr_offset_from)] +#![feature(ptr_offset_from)] + +struct Struct { + field: (), +} + +#[repr(C)] +struct Struct2 { + data: u8, + field: u8, +} + +pub const OFFSET: usize = { + let uninit = std::mem::MaybeUninit::::uninit(); + let base_ptr: *const Struct = &uninit as *const _ as *const Struct; + // The following statement is UB (taking the address of an uninitialized field). + // Const eval doesn't detect this right now, but it may stop compiling at some point + // in the future. + let field_ptr = unsafe { &(*base_ptr).field as *const () as *const u8 }; + let offset = unsafe { field_ptr.offset_from(base_ptr as *const u8) }; + offset as usize +}; + +pub const OFFSET_2: usize = { + let uninit = std::mem::MaybeUninit::::uninit(); + let base_ptr: *const Struct2 = &uninit as *const _ as *const Struct2; + let field_ptr = unsafe { &(*base_ptr).field as *const u8 }; + let offset = unsafe { field_ptr.offset_from(base_ptr as *const u8) }; + offset as usize +}; + +pub const OVERFLOW: isize = { + let uninit = std::mem::MaybeUninit::::uninit(); + let base_ptr: *const Struct2 = &uninit as *const _ as *const Struct2; + let field_ptr = unsafe { &(*base_ptr).field as *const u8 }; + unsafe { (base_ptr as *const u8).offset_from(field_ptr) } +}; + +fn main() { + assert_eq!(OFFSET, 0); + assert_eq!(OFFSET_2, 1); + assert_eq!(OVERFLOW, -1); +} diff --git a/src/test/ui/consts/offset_from_ub.rs b/src/test/ui/consts/offset_from_ub.rs new file mode 100644 index 0000000000000..a233c24a7c52c --- /dev/null +++ b/src/test/ui/consts/offset_from_ub.rs @@ -0,0 +1,38 @@ +// ingoring on musl because it's ui output hides libcore backtraces +// ignore-musl + +#![feature(const_raw_ptr_deref)] +#![feature(const_ptr_offset_from)] +#![feature(ptr_offset_from)] + +#[repr(C)] +struct Struct { + data: u8, + field: u8, +} + +pub const DIFFERENT_ALLOC: usize = { + //~^ NOTE + let uninit = std::mem::MaybeUninit::::uninit(); + let base_ptr: *const Struct = &uninit as *const _ as *const Struct; + let uninit2 = std::mem::MaybeUninit::::uninit(); + let field_ptr: *const Struct = &uninit2 as *const _ as *const Struct; + let offset = unsafe { field_ptr.offset_from(base_ptr) }; + offset as usize +}; + +pub const NOT_PTR: usize = { + //~^ NOTE + unsafe { (42 as *const u8).offset_from(&5u8) as usize } +}; + +pub const NOT_MULTIPLE_OF_SIZE: usize = { + //~^ NOTE + let data = [5u8, 6, 7]; + let base_ptr = data.as_ptr(); + let field_ptr = &data[1] as *const u8 as *const u16; + let offset = unsafe { field_ptr.offset_from(base_ptr as *const u16) }; + offset as usize +}; + +fn main() {} diff --git a/src/test/ui/consts/offset_from_ub.stderr b/src/test/ui/consts/offset_from_ub.stderr new file mode 100644 index 0000000000000..1460170a108cb --- /dev/null +++ b/src/test/ui/consts/offset_from_ub.stderr @@ -0,0 +1,61 @@ +error: any use of this value will cause an error + --> $SRC_DIR/libcore/ptr/mod.rs:LL:COL + | +LL | intrinsics::ptr_offset_from(self, origin) + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | | + | ptr_offset_from cannot compute offset of pointers into different allocations. + | inside call to `std::ptr::::offset_from` at $DIR/offset_from_ub.rs:20:27 + | + ::: $DIR/offset_from_ub.rs:14:1 + | +LL | / pub const DIFFERENT_ALLOC: usize = { +LL | | +LL | | let uninit = std::mem::MaybeUninit::::uninit(); +LL | | let base_ptr: *const Struct = &uninit as *const _ as *const Struct; +... | +LL | | offset as usize +LL | | }; + | |__- + | + = note: `#[deny(const_err)]` on by default + +error: any use of this value will cause an error + --> $SRC_DIR/libcore/ptr/mod.rs:LL:COL + | +LL | intrinsics::ptr_offset_from(self, origin) + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | | + | a memory access tried to interpret some bytes as a pointer + | inside call to `std::ptr::::offset_from` at $DIR/offset_from_ub.rs:26:14 + | + ::: $DIR/offset_from_ub.rs:24:1 + | +LL | / pub const NOT_PTR: usize = { +LL | | +LL | | unsafe { (42 as *const u8).offset_from(&5u8) as usize } +LL | | }; + | |__- + +error: any use of this value will cause an error + --> $SRC_DIR/libcore/ptr/mod.rs:LL:COL + | +LL | intrinsics::ptr_offset_from(self, origin) + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | | + | exact_div: 1 cannot be divided by 2 without remainder + | inside call to `std::ptr::::offset_from` at $DIR/offset_from_ub.rs:34:27 + | + ::: $DIR/offset_from_ub.rs:29:1 + | +LL | / pub const NOT_MULTIPLE_OF_SIZE: usize = { +LL | | +LL | | let data = [5u8, 6, 7]; +LL | | let base_ptr = data.as_ptr(); +... | +LL | | offset as usize +LL | | }; + | |__- + +error: aborting due to 3 previous errors + diff --git a/src/test/ui/offset_from.rs b/src/test/ui/offset_from.rs new file mode 100644 index 0000000000000..cbbb2adf15f91 --- /dev/null +++ b/src/test/ui/offset_from.rs @@ -0,0 +1,15 @@ +// run-pass + +#![feature(ptr_offset_from)] + +fn main() { + let mut a = [0; 5]; + let ptr1: *mut i32 = &mut a[1]; + let ptr2: *mut i32 = &mut a[3]; + unsafe { + assert_eq!(ptr2.offset_from(ptr1), 2); + assert_eq!(ptr1.offset_from(ptr2), -2); + assert_eq!(ptr1.offset(2), ptr2); + assert_eq!(ptr2.offset(-2), ptr1); + } +}