From 8330377a2b22323e6611c58551a026f54144a7b0 Mon Sep 17 00:00:00 2001
From: Peter Jaszkowiak
Date: Wed, 8 Nov 2023 20:58:28 -0700
Subject: [PATCH 1/2] fallible allocation for Vec via 3rd generic param
---
library/alloc/src/alloc.rs | 3 +
library/alloc/src/alloc/failure_handling.rs | 39 ++
library/alloc/src/boxed.rs | 6 +-
.../alloc/src/collections/vec_deque/mod.rs | 3 +-
library/alloc/src/raw_vec.rs | 245 ++++++---
library/alloc/src/rc.rs | 3 +-
library/alloc/src/slice.rs | 13 +-
library/alloc/src/sync.rs | 3 +-
library/alloc/src/vec/drain.rs | 30 +-
library/alloc/src/vec/extract_if.rs | 10 +-
library/alloc/src/vec/into_iter.rs | 5 +-
library/alloc/src/vec/mod.rs | 477 ++++++++++++++++--
library/alloc/src/vec/partial_eq.rs | 22 +-
library/alloc/src/vec/spec_extend.rs | 11 +-
library/alloc/src/vec/spec_from_elem.rs | 9 +-
.../alloc/src/vec/spec_from_iter_nested.rs | 4 +-
library/alloc/src/vec/splice.rs | 3 +-
library/alloc/src/vec/try_spec_extend.rs | 60 +++
18 files changed, 776 insertions(+), 170 deletions(-)
create mode 100644 library/alloc/src/alloc/failure_handling.rs
create mode 100644 library/alloc/src/vec/try_spec_extend.rs
diff --git a/library/alloc/src/alloc.rs b/library/alloc/src/alloc.rs
index 2499f1053d860..517132fc90678 100644
--- a/library/alloc/src/alloc.rs
+++ b/library/alloc/src/alloc.rs
@@ -12,6 +12,9 @@ use core::ptr::{self, NonNull};
#[doc(inline)]
pub use core::alloc::*;
+#[unstable(feature = "allocator_api", issue = "32838")]
+pub mod failure_handling;
+
#[cfg(test)]
mod tests;
diff --git a/library/alloc/src/alloc/failure_handling.rs b/library/alloc/src/alloc/failure_handling.rs
new file mode 100644
index 0000000000000..fc3d765bc5b92
--- /dev/null
+++ b/library/alloc/src/alloc/failure_handling.rs
@@ -0,0 +1,39 @@
+//! TBD
+//!
+
+mod sealed {
+ pub trait Sealed {}
+}
+
+/// Describes the handling behavior in case of allocation failure.
+pub trait FailureHandling: sealed::Sealed + Send + Sync + Unpin {
+ /// The type returned by allocating functions.
+ ///
+ /// `Fallible` functions will return `Result`,
+ /// but `Fatal` functions will return `T`.
+ type Result;
+}
+
+/// Handle allocation failure globally by panicking / aborting.
+#[derive(Debug)]
+pub struct Fatal;
+
+impl sealed::Sealed for Fatal {}
+impl FailureHandling for Fatal {
+ type Result = T;
+}
+
+/// Handle allocation failure falliblyby returning a `Result`.
+#[derive(Debug)]
+pub struct Fallible;
+
+impl sealed::Sealed for Fallible {}
+impl FailureHandling for Fallible {
+ type Result = Result;
+}
+
+/// Type parameter default `FailureHandling` for use in containers.
+#[cfg(not(no_global_oom_handling))]
+pub type DefaultFailureHandling = Fatal;
+#[cfg(no_global_oom_handling)]
+pub type DefaultFailureHandling = Fallible;
diff --git a/library/alloc/src/boxed.rs b/library/alloc/src/boxed.rs
index 25c63b425ce59..6af4174e8492d 100644
--- a/library/alloc/src/boxed.rs
+++ b/library/alloc/src/boxed.rs
@@ -165,6 +165,8 @@ use core::pin::Pin;
use core::ptr::{self, NonNull, Unique};
use core::task::{Context, Poll};
+use crate::alloc::failure_handling::Fallible;
+
#[cfg(not(no_global_oom_handling))]
use crate::alloc::{handle_alloc_error, WriteCloneIntoRaw};
use crate::alloc::{AllocError, Allocator, Global, Layout};
@@ -692,7 +694,7 @@ impl Box<[T]> {
};
Global.allocate(layout)?.cast()
};
- unsafe { Ok(RawVec::from_raw_parts_in(ptr.as_ptr(), len, Global).into_box(len)) }
+ unsafe { Ok(RawVec::<_, _, Fallible>::from_raw_parts_in(ptr.as_ptr(), len, Global).into_box(len)) }
}
/// Constructs a new boxed slice with uninitialized contents, with the memory
@@ -726,7 +728,7 @@ impl Box<[T]> {
};
Global.allocate_zeroed(layout)?.cast()
};
- unsafe { Ok(RawVec::from_raw_parts_in(ptr.as_ptr(), len, Global).into_box(len)) }
+ unsafe { Ok(RawVec::<_, _, Fallible>::from_raw_parts_in(ptr.as_ptr(), len, Global).into_box(len)) }
}
}
diff --git a/library/alloc/src/collections/vec_deque/mod.rs b/library/alloc/src/collections/vec_deque/mod.rs
index 4ef8af9b03475..992560d5fdb1f 100644
--- a/library/alloc/src/collections/vec_deque/mod.rs
+++ b/library/alloc/src/collections/vec_deque/mod.rs
@@ -22,6 +22,7 @@ use core::slice;
#[allow(unused_imports)]
use core::mem;
+use crate::alloc::failure_handling::Fatal;
use crate::alloc::{Allocator, Global};
use crate::collections::TryReserveError;
use crate::collections::TryReserveErrorKind;
@@ -102,7 +103,7 @@ pub struct VecDeque<
// if `len == 0`, the exact value of `head` is unimportant.
// if `T` is zero-Sized, then `self.len <= usize::MAX`, otherwise `self.len <= isize::MAX as usize`.
len: usize,
- buf: RawVec,
+ buf: RawVec,
}
#[stable(feature = "rust1", since = "1.0.0")]
diff --git a/library/alloc/src/raw_vec.rs b/library/alloc/src/raw_vec.rs
index 817b93720ce28..40e417f48259f 100644
--- a/library/alloc/src/raw_vec.rs
+++ b/library/alloc/src/raw_vec.rs
@@ -3,10 +3,12 @@
use core::alloc::LayoutError;
use core::cmp;
use core::intrinsics;
+use core::marker::PhantomData;
use core::mem::{self, ManuallyDrop, MaybeUninit, SizedTypeProperties};
use core::ptr::{self, NonNull, Unique};
use core::slice;
+use crate::alloc::failure_handling::DefaultFailureHandling;
#[cfg(not(no_global_oom_handling))]
use crate::alloc::handle_alloc_error;
use crate::alloc::{Allocator, Global, Layout};
@@ -14,6 +16,8 @@ use crate::boxed::Box;
use crate::collections::TryReserveError;
use crate::collections::TryReserveErrorKind::*;
+use crate::alloc::failure_handling::{FailureHandling, Fatal, Fallible};
+
#[cfg(test)]
mod tests;
@@ -48,13 +52,14 @@ enum AllocInit {
/// `usize::MAX`. This means that you need to be careful when round-tripping this type with a
/// `Box<[T]>`, since `capacity()` won't yield the length.
#[allow(missing_debug_implementations)]
-pub(crate) struct RawVec {
+pub(crate) struct RawVec {
ptr: Unique,
cap: usize,
alloc: A,
+ _fh: PhantomData,
}
-impl RawVec {
+impl RawVec {
/// HACK(Centril): This exists because stable `const fn` can only call stable `const fn`, so
/// they cannot call `Self::new()`.
///
@@ -101,7 +106,7 @@ impl RawVec {
}
}
-impl RawVec {
+impl RawVec {
// Tiny Vecs are dumb. Skip to:
// - 8 if the element size is 1, because any heap allocators is likely
// to round up a request of less than 8 bytes to at least 8 bytes.
@@ -119,23 +124,7 @@ impl RawVec {
/// the returned `RawVec`.
pub const fn new_in(alloc: A) -> Self {
// `cap: 0` means "unallocated". zero-sized types are ignored.
- Self { ptr: Unique::dangling(), cap: 0, alloc }
- }
-
- /// Like `with_capacity`, but parameterized over the choice of
- /// allocator for the returned `RawVec`.
- #[cfg(not(no_global_oom_handling))]
- #[inline]
- pub fn with_capacity_in(capacity: usize, alloc: A) -> Self {
- Self::allocate_in(capacity, AllocInit::Uninitialized, alloc)
- }
-
- /// Like `with_capacity_zeroed`, but parameterized over the choice
- /// of allocator for the returned `RawVec`.
- #[cfg(not(no_global_oom_handling))]
- #[inline]
- pub fn with_capacity_zeroed_in(capacity: usize, alloc: A) -> Self {
- Self::allocate_in(capacity, AllocInit::Zeroed, alloc)
+ Self { ptr: Unique::dangling(), cap: 0, alloc, _fh: PhantomData }
}
/// Converts the entire buffer into `Box<[MaybeUninit]>` with the specified `len`.
@@ -164,42 +153,6 @@ impl RawVec {
}
}
- #[cfg(not(no_global_oom_handling))]
- fn allocate_in(capacity: usize, init: AllocInit, alloc: A) -> Self {
- // Don't allocate here because `Drop` will not deallocate when `capacity` is 0.
- if T::IS_ZST || capacity == 0 {
- Self::new_in(alloc)
- } else {
- // We avoid `unwrap_or_else` here because it bloats the amount of
- // LLVM IR generated.
- let layout = match Layout::array::(capacity) {
- Ok(layout) => layout,
- Err(_) => capacity_overflow(),
- };
- match alloc_guard(layout.size()) {
- Ok(_) => {}
- Err(_) => capacity_overflow(),
- }
- let result = match init {
- AllocInit::Uninitialized => alloc.allocate(layout),
- AllocInit::Zeroed => alloc.allocate_zeroed(layout),
- };
- let ptr = match result {
- Ok(ptr) => ptr,
- Err(_) => handle_alloc_error(layout),
- };
-
- // Allocators currently return a `NonNull<[u8]>` whose length
- // matches the size requested. If that ever changes, the capacity
- // here should change to `ptr.len() / mem::size_of::()`.
- Self {
- ptr: unsafe { Unique::new_unchecked(ptr.cast().as_ptr()) },
- cap: capacity,
- alloc,
- }
- }
- }
-
/// Reconstitutes a `RawVec` from a pointer, capacity, and allocator.
///
/// # Safety
@@ -212,7 +165,7 @@ impl RawVec {
/// guaranteed.
#[inline]
pub unsafe fn from_raw_parts_in(ptr: *mut T, capacity: usize, alloc: A) -> Self {
- Self { ptr: unsafe { Unique::new_unchecked(ptr) }, cap: capacity, alloc }
+ Self { ptr: unsafe { Unique::new_unchecked(ptr) }, cap: capacity, alloc, _fh: PhantomData }
}
/// Gets a raw pointer to the start of the allocation. Note that this is
@@ -253,6 +206,61 @@ impl RawVec {
}
}
}
+}
+
+impl RawVec {
+ /// Like `with_capacity`, but parameterized over the choice of
+ /// allocator for the returned `RawVec`.
+ #[cfg(not(no_global_oom_handling))]
+ #[inline]
+ pub fn with_capacity_in(capacity: usize, alloc: A) -> Self {
+ Self::allocate_in(capacity, AllocInit::Uninitialized, alloc)
+ }
+
+ /// Like `with_capacity_zeroed`, but parameterized over the choice
+ /// of allocator for the returned `RawVec`.
+ #[cfg(not(no_global_oom_handling))]
+ #[inline]
+ pub fn with_capacity_zeroed_in(capacity: usize, alloc: A) -> Self {
+ Self::allocate_in(capacity, AllocInit::Zeroed, alloc)
+ }
+
+ #[cfg(not(no_global_oom_handling))]
+ fn allocate_in(capacity: usize, init: AllocInit, alloc: A) -> Self {
+ // Don't allocate here because `Drop` will not deallocate when `capacity` is 0.
+ if T::IS_ZST || capacity == 0 {
+ Self::new_in(alloc)
+ } else {
+ // We avoid `unwrap_or_else` here because it bloats the amount of
+ // LLVM IR generated.
+ let layout = match Layout::array::(capacity) {
+ Ok(layout) => layout,
+ Err(_) => capacity_overflow(),
+ };
+ match alloc_guard(layout.size()) {
+ Ok(_) => {}
+ Err(_) => capacity_overflow(),
+ }
+ let result = match init {
+ AllocInit::Uninitialized => alloc.allocate(layout),
+ AllocInit::Zeroed => alloc.allocate_zeroed(layout),
+ };
+ let ptr = match result {
+ Ok(ptr) => ptr,
+ Err(_) => handle_alloc_error(layout),
+ };
+
+ // Allocators currently return a `NonNull<[u8]>` whose length
+ // matches the size requested. If that ever changes, the capacity
+ // here should change to `ptr.len() / mem::size_of::()`.
+ Self {
+ ptr: unsafe { Unique::new_unchecked(ptr.cast().as_ptr()) },
+ cap: capacity,
+ alloc,
+ _fh: PhantomData,
+ }
+ }
+ }
/// Ensures that the buffer contains at least enough space to hold `len +
/// additional` elements. If it doesn't already have enough capacity, will
@@ -282,7 +290,7 @@ impl RawVec {
// inlined as just a comparison and a call if the comparison fails.
#[cold]
fn do_reserve_and_handle(
- slf: &mut RawVec,
+ slf: &mut RawVec,
len: usize,
additional: usize,
) {
@@ -368,7 +376,124 @@ impl RawVec {
}
}
-impl RawVec {
+impl RawVec {
+ /// Like `with_capacity`, but parameterized over the choice of
+ /// allocator for the returned `RawVec`.
+ #[inline]
+ pub fn try_with_capacity_in(capacity: usize, alloc: A) -> Result {
+ Self::try_allocate_in(capacity, AllocInit::Uninitialized, alloc)
+ }
+
+ // /// Like `with_capacity_zeroed`, but parameterized over the choice
+ // /// of allocator for the returned `RawVec`.
+ // #[inline]
+ // pub fn with_capacity_zeroed_in(capacity: usize, alloc: A) -> Result {
+ // Self::allocate_in(capacity, AllocInit::Zeroed, alloc)
+ // }
+
+ fn try_allocate_in(capacity: usize, init: AllocInit, alloc: A) -> Result {
+ // Don't allocate here because `Drop` will not deallocate when `capacity` is 0.
+
+ use crate::collections::TryReserveErrorKind;
+ if T::IS_ZST || capacity == 0 {
+ return Ok(Self::new_in(alloc));
+ } else {
+ // We avoid `unwrap_or_else` here because it bloats the amount of
+ // LLVM IR generated.
+ let layout = match Layout::array::(capacity) {
+ Ok(layout) => layout,
+ Err(_) => Err(TryReserveErrorKind::CapacityOverflow)?,
+ };
+ match alloc_guard(layout.size()) {
+ Ok(_) => {}
+ Err(_) => Err(TryReserveErrorKind::CapacityOverflow)?,
+ }
+ let result = match init {
+ AllocInit::Uninitialized => alloc.allocate(layout),
+ AllocInit::Zeroed => alloc.allocate_zeroed(layout),
+ };
+ let ptr = match result {
+ Ok(ptr) => ptr,
+ Err(_) => Err(TryReserveErrorKind::AllocError { layout, non_exhaustive: () })?,
+ };
+ // Allocators currently return a `NonNull<[u8]>` whose length
+ // matches the size requested. If that ever changes, the capacity
+ // here should change to `ptr.len() / mem::size_of::()`.
+ Ok(Self {
+ ptr: unsafe { Unique::new_unchecked(ptr.cast().as_ptr()) },
+ cap: capacity,
+ alloc,
+ _fh: PhantomData,
+ })
+ }
+ }
+
+ /// Ensures that the buffer contains at least enough space to hold `len +
+ /// additional` elements. If it doesn't already have enough capacity, will
+ /// reallocate enough space plus comfortable slack space to get amortized
+ /// *O*(1) behavior. Will limit this behavior if it would needlessly cause
+ /// itself to panic.
+ ///
+ /// If `len` exceeds `self.capacity()`, this may fail to actually allocate
+ /// the requested space. This is not really unsafe, but the unsafe
+ /// code *you* write that relies on the behavior of this function may break.
+ ///
+ /// This is ideal for implementing a bulk-push operation like `extend`.
+ #[inline]
+ pub fn reserve(&mut self, len: usize, additional: usize) -> Result<(), TryReserveError> {
+ // Callers expect this function to be very cheap when there is already sufficient capacity.
+ // Therefore, we move all the resizing and error-handling logic from grow_amortized and
+ // handle_reserve behind a call, while making sure that this function is likely to be
+ // inlined as just a comparison and a call if the comparison fails.
+ #[cold]
+ fn do_reserve_and_handle(
+ slf: &mut RawVec,
+ len: usize,
+ additional: usize,
+ ) -> Result<(), TryReserveError> {
+ slf.grow_amortized(len, additional)
+ }
+
+ if self.needs_to_grow(len, additional) {
+ return do_reserve_and_handle(self, len, additional);
+ }
+
+ return Ok(());
+ }
+
+ /// A specialized version of `reserve()` used only by the hot and
+ /// oft-instantiated `Vec::push()`, which does its own capacity check.
+ #[inline(never)]
+ pub fn reserve_for_push(&mut self, len: usize) -> Result<(), TryReserveError> {
+ self.grow_amortized(len, 1)
+ }
+
+ /// Ensures that the buffer contains at least enough space to hold `len +
+ /// additional` elements. If it doesn't already, will reallocate the
+ /// minimum possible amount of memory necessary. Generally this will be
+ /// exactly the amount of memory necessary, but in principle the allocator
+ /// is free to give back more than we asked for.
+ ///
+ /// If `len` exceeds `self.capacity()`, this may fail to actually allocate
+ /// the requested space. This is not really unsafe, but the unsafe code
+ /// *you* write that relies on the behavior of this function may break.
+ pub fn reserve_exact(
+ &mut self,
+ len: usize,
+ additional: usize,
+ ) -> Result<(), TryReserveError> {
+ if self.needs_to_grow(len, additional) { self.grow_exact(len, additional) } else { Ok(()) }
+ }
+
+ /// Shrinks the buffer down to the specified capacity. If the given amount
+ /// is 0, actually completely deallocates.
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ pub fn shrink_to_fit(&mut self, cap: usize) -> Result<(), TryReserveError> {
+ self.shrink(cap)
+ }
+}
+
+impl RawVec {
/// Returns if the buffer needs to grow to fulfill the needed extra capacity.
/// Mainly used to make inlining reserve-calls possible without inlining `grow`.
fn needs_to_grow(&self, len: usize, additional: usize) -> bool {
@@ -498,7 +623,7 @@ where
memory.map_err(|_| AllocError { layout: new_layout, non_exhaustive: () }.into())
}
-unsafe impl<#[may_dangle] T, A: Allocator> Drop for RawVec {
+unsafe impl<#[may_dangle] T, A: Allocator, FH: FailureHandling> Drop for RawVec {
/// Frees the memory owned by the `RawVec` *without* trying to drop its contents.
fn drop(&mut self) {
if let Some((ptr, layout)) = self.current_memory() {
diff --git a/library/alloc/src/rc.rs b/library/alloc/src/rc.rs
index dd7876bed7691..fc60ea679903b 100644
--- a/library/alloc/src/rc.rs
+++ b/library/alloc/src/rc.rs
@@ -241,6 +241,7 @@
#![stable(feature = "rust1", since = "1.0.0")]
+use crate::alloc::failure_handling::Fallible;
#[cfg(not(test))]
use crate::boxed::Box;
#[cfg(test)]
@@ -2528,7 +2529,7 @@ impl From> for Rc<[T], A> {
// Create a `Vec` with length 0, to deallocate the buffer
// without dropping its contents or the allocator
- let _ = Vec::from_raw_parts_in(vec_ptr, 0, cap, &alloc);
+ let _ = Vec::<_, _, Fallible>::from_raw_parts_in(vec_ptr, 0, cap, &alloc);
Self::from_ptr_in(rc_ptr, alloc)
}
diff --git a/library/alloc/src/slice.rs b/library/alloc/src/slice.rs
index aa3b7b7e1914b..5b2d65d2291d5 100644
--- a/library/alloc/src/slice.rs
+++ b/library/alloc/src/slice.rs
@@ -91,13 +91,14 @@ pub use hack::to_vec;
pub(crate) mod hack {
use core::alloc::Allocator;
+ use crate::alloc::failure_handling::Fatal;
use crate::boxed::Box;
use crate::vec::Vec;
// We shouldn't add inline attribute to this since this is used in
// `vec!` macro mostly and causes perf regression. See #71204 for
// discussion and perf results.
- pub fn into_vec(b: Box<[T], A>) -> Vec {
+ pub fn into_vec(b: Box<[T], A>) -> Vec {
unsafe {
let len = b.len();
let (b, alloc) = Box::into_raw_with_allocator(b);
@@ -107,13 +108,13 @@ pub(crate) mod hack {
#[cfg(not(no_global_oom_handling))]
#[inline]
- pub fn to_vec(s: &[T], alloc: A) -> Vec {
+ pub fn to_vec(s: &[T], alloc: A) -> Vec {
T::to_vec(s, alloc)
}
#[cfg(not(no_global_oom_handling))]
pub trait ConvertVec {
- fn to_vec(s: &[Self], alloc: A) -> Vec
+ fn to_vec(s: &[Self], alloc: A) -> Vec
where
Self: Sized;
}
@@ -121,9 +122,9 @@ pub(crate) mod hack {
#[cfg(not(no_global_oom_handling))]
impl ConvertVec for T {
#[inline]
- default fn to_vec(s: &[Self], alloc: A) -> Vec {
+ default fn to_vec(s: &[Self], alloc: A) -> Vec {
struct DropGuard<'a, T, A: Allocator> {
- vec: &'a mut Vec,
+ vec: &'a mut Vec,
num_init: usize,
}
impl<'a, T, A: Allocator> Drop for DropGuard<'a, T, A> {
@@ -158,7 +159,7 @@ pub(crate) mod hack {
#[cfg(not(no_global_oom_handling))]
impl ConvertVec for T {
#[inline]
- fn to_vec(s: &[Self], alloc: A) -> Vec {
+ fn to_vec(s: &[Self], alloc: A) -> Vec {
let mut v = Vec::with_capacity_in(s.len(), alloc);
// SAFETY:
// allocated above with the capacity of `s`, and initialize to `s.len()` in
diff --git a/library/alloc/src/sync.rs b/library/alloc/src/sync.rs
index 351e6c1a4b3d2..55d00b21950e7 100644
--- a/library/alloc/src/sync.rs
+++ b/library/alloc/src/sync.rs
@@ -30,6 +30,7 @@ use core::slice::from_raw_parts_mut;
use core::sync::atomic;
use core::sync::atomic::Ordering::{Acquire, Relaxed, Release};
+use crate::alloc::failure_handling::Fallible;
#[cfg(not(no_global_oom_handling))]
use crate::alloc::handle_alloc_error;
#[cfg(not(no_global_oom_handling))]
@@ -3390,7 +3391,7 @@ impl From> for Arc<[T], A> {
// Create a `Vec` with length 0, to deallocate the buffer
// without dropping its contents or the allocator
- let _ = Vec::from_raw_parts_in(vec_ptr, 0, cap, &alloc);
+ let _ = Vec::<_, _, Fallible>::from_raw_parts_in(vec_ptr, 0, cap, &alloc);
Self::from_ptr_in(rc_ptr, alloc)
}
diff --git a/library/alloc/src/vec/drain.rs b/library/alloc/src/vec/drain.rs
index f0b63759ac70f..4a1eb01f29d09 100644
--- a/library/alloc/src/vec/drain.rs
+++ b/library/alloc/src/vec/drain.rs
@@ -1,3 +1,4 @@
+use crate::alloc::failure_handling::{FailureHandling, DefaultFailureHandling};
use crate::alloc::{Allocator, Global};
use core::fmt;
use core::iter::{FusedIterator, TrustedLen};
@@ -23,6 +24,7 @@ pub struct Drain<
'a,
T: 'a,
#[unstable(feature = "allocator_api", issue = "32838")] A: Allocator + 'a = Global,
+ #[unstable(feature = "allocator_api", issue = "32838")] FH: FailureHandling = DefaultFailureHandling,
> {
/// Index of tail to preserve
pub(super) tail_start: usize,
@@ -30,17 +32,17 @@ pub struct Drain<
pub(super) tail_len: usize,
/// Current remaining range to remove
pub(super) iter: slice::Iter<'a, T>,
- pub(super) vec: NonNull>,
+ pub(super) vec: NonNull>,
}
#[stable(feature = "collection_debug", since = "1.17.0")]
-impl fmt::Debug for Drain<'_, T, A> {
+impl fmt::Debug for Drain<'_, T, A, FH> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_tuple("Drain").field(&self.iter.as_slice()).finish()
}
}
-impl<'a, T, A: Allocator> Drain<'a, T, A> {
+impl<'a, T, A: Allocator, FH: FailureHandling> Drain<'a, T, A, FH> {
/// Returns the remaining items of this iterator as a slice.
///
/// # Examples
@@ -137,19 +139,19 @@ impl<'a, T, A: Allocator> Drain<'a, T, A> {
}
#[stable(feature = "vec_drain_as_slice", since = "1.46.0")]
-impl<'a, T, A: Allocator> AsRef<[T]> for Drain<'a, T, A> {
+impl<'a, T, A: Allocator, FH: FailureHandling> AsRef<[T]> for Drain<'a, T, A, FH> {
fn as_ref(&self) -> &[T] {
self.as_slice()
}
}
#[stable(feature = "drain", since = "1.6.0")]
-unsafe impl Sync for Drain<'_, T, A> {}
+unsafe impl Sync for Drain<'_, T, A, FH> {}
#[stable(feature = "drain", since = "1.6.0")]
-unsafe impl Send for Drain<'_, T, A> {}
+unsafe impl Send for Drain<'_, T, A, FH> {}
#[stable(feature = "drain", since = "1.6.0")]
-impl Iterator for Drain<'_, T, A> {
+impl Iterator for Drain<'_, T, A, FH> {
type Item = T;
#[inline]
@@ -163,7 +165,7 @@ impl Iterator for Drain<'_, T, A> {
}
#[stable(feature = "drain", since = "1.6.0")]
-impl DoubleEndedIterator for Drain<'_, T, A> {
+impl DoubleEndedIterator for Drain<'_, T, A, FH> {
#[inline]
fn next_back(&mut self) -> Option {
self.iter.next_back().map(|elt| unsafe { ptr::read(elt as *const _) })
@@ -171,12 +173,12 @@ impl DoubleEndedIterator for Drain<'_, T, A> {
}
#[stable(feature = "drain", since = "1.6.0")]
-impl Drop for Drain<'_, T, A> {
+impl Drop for Drain<'_, T, A, FH> {
fn drop(&mut self) {
/// Moves back the un-`Drain`ed elements to restore the original `Vec`.
- struct DropGuard<'r, 'a, T, A: Allocator>(&'r mut Drain<'a, T, A>);
+ struct DropGuard<'r, 'a, T, A: Allocator, FH: FailureHandling>(&'r mut Drain<'a, T, A, FH>);
- impl<'r, 'a, T, A: Allocator> Drop for DropGuard<'r, 'a, T, A> {
+ impl<'r, 'a, T, A: Allocator, FH: FailureHandling> Drop for DropGuard<'r, 'a, T, A, FH> {
fn drop(&mut self) {
if self.0.tail_len > 0 {
unsafe {
@@ -240,14 +242,14 @@ impl Drop for Drain<'_, T, A> {
}
#[stable(feature = "drain", since = "1.6.0")]
-impl ExactSizeIterator for Drain<'_, T, A> {
+impl ExactSizeIterator for Drain<'_, T, A, FH> {
fn is_empty(&self) -> bool {
self.iter.is_empty()
}
}
#[unstable(feature = "trusted_len", issue = "37572")]
-unsafe impl TrustedLen for Drain<'_, T, A> {}
+unsafe impl TrustedLen for Drain<'_, T, A, FH> {}
#[stable(feature = "fused", since = "1.26.0")]
-impl FusedIterator for Drain<'_, T, A> {}
+impl FusedIterator for Drain<'_, T, A, FH> {}
diff --git a/library/alloc/src/vec/extract_if.rs b/library/alloc/src/vec/extract_if.rs
index 118cfdb36b9c2..01460070d644d 100644
--- a/library/alloc/src/vec/extract_if.rs
+++ b/library/alloc/src/vec/extract_if.rs
@@ -1,3 +1,4 @@
+use crate::alloc::failure_handling::{FailureHandling, DefaultFailureHandling};
use crate::alloc::{Allocator, Global};
use core::ptr;
use core::slice;
@@ -25,10 +26,11 @@ pub struct ExtractIf<
T,
F,
#[unstable(feature = "allocator_api", issue = "32838")] A: Allocator = Global,
+ #[unstable(feature = "allocator_api", issue = "32838")] FH: FailureHandling = DefaultFailureHandling,
> where
F: FnMut(&mut T) -> bool,
{
- pub(super) vec: &'a mut Vec,
+ pub(super) vec: &'a mut Vec,
/// The index of the item that will be inspected by the next call to `next`.
pub(super) idx: usize,
/// The number of items that have been drained (removed) thus far.
@@ -39,7 +41,7 @@ pub struct ExtractIf<
pub(super) pred: F,
}
-impl ExtractIf<'_, T, F, A>
+impl ExtractIf<'_, T, F, A, FH>
where
F: FnMut(&mut T) -> bool,
{
@@ -52,7 +54,7 @@ where
}
#[unstable(feature = "extract_if", reason = "recently added", issue = "43244")]
-impl Iterator for ExtractIf<'_, T, F, A>
+impl Iterator for ExtractIf<'_, T, F, A, FH>
where
F: FnMut(&mut T) -> bool,
{
@@ -88,7 +90,7 @@ where
}
#[unstable(feature = "extract_if", reason = "recently added", issue = "43244")]
-impl Drop for ExtractIf<'_, T, F, A>
+impl Drop for ExtractIf<'_, T, F, A, FH>
where
F: FnMut(&mut T) -> bool,
{
diff --git a/library/alloc/src/vec/into_iter.rs b/library/alloc/src/vec/into_iter.rs
index b2db2fdfd18f1..f7b153dcdd94e 100644
--- a/library/alloc/src/vec/into_iter.rs
+++ b/library/alloc/src/vec/into_iter.rs
@@ -1,5 +1,6 @@
#[cfg(not(no_global_oom_handling))]
use super::AsVecIntoIter;
+use crate::alloc::failure_handling::Fallible;
use crate::alloc::{Allocator, Global};
#[cfg(not(no_global_oom_handling))]
use crate::collections::VecDeque;
@@ -356,7 +357,7 @@ where
/// assert_eq!(iter.as_slice(), &[]);
/// ```
fn default() -> Self {
- super::Vec::new_in(Default::default()).into_iter()
+ super::Vec::<_, _, Fallible>::new_in(Default::default()).into_iter()
}
}
@@ -405,7 +406,7 @@ unsafe impl<#[may_dangle] T, A: Allocator> Drop for IntoIter {
// `IntoIter::alloc` is not used anymore after this and will be dropped by RawVec
let alloc = ManuallyDrop::take(&mut self.0.alloc);
// RawVec handles deallocation
- let _ = RawVec::from_raw_parts_in(self.0.buf.as_ptr(), self.0.cap, alloc);
+ let _ = RawVec::<_, _, Fallible>::from_raw_parts_in(self.0.buf.as_ptr(), self.0.cap, alloc);
}
}
}
diff --git a/library/alloc/src/vec/mod.rs b/library/alloc/src/vec/mod.rs
index 6c78d65f1c943..b64129e97def7 100644
--- a/library/alloc/src/vec/mod.rs
+++ b/library/alloc/src/vec/mod.rs
@@ -65,10 +65,11 @@ use core::ops::{self, Index, IndexMut, Range, RangeBounds};
use core::ptr::{self, NonNull};
use core::slice::{self, SliceIndex};
+use crate::alloc::failure_handling::{FailureHandling, DefaultFailureHandling, Fatal, Fallible};
use crate::alloc::{Allocator, Global};
use crate::borrow::{Cow, ToOwned};
use crate::boxed::Box;
-use crate::collections::TryReserveError;
+use crate::collections::{TryReserveError, TryReserveErrorKind};
use crate::raw_vec::RawVec;
#[unstable(feature = "extract_if", reason = "recently added", issue = "43244")]
@@ -144,6 +145,10 @@ use self::spec_extend::SpecExtend;
#[cfg(not(no_global_oom_handling))]
mod spec_extend;
+use self::try_spec_extend::TrySpecExtend;
+
+mod try_spec_extend;
+
/// A contiguous growable array type, written as `Vec`, short for 'vector'.
///
/// # Examples
@@ -393,8 +398,12 @@ mod spec_extend;
#[stable(feature = "rust1", since = "1.0.0")]
#[cfg_attr(not(test), rustc_diagnostic_item = "Vec")]
#[rustc_insignificant_dtor]
-pub struct Vec {
- buf: RawVec,
+pub struct Vec<
+ T,
+ #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator = Global,
+ #[unstable(feature = "allocator_api", issue = "32838")] FH: FailureHandling = DefaultFailureHandling,
+> {
+ buf: RawVec,
len: usize,
}
@@ -587,7 +596,7 @@ impl Vec {
}
}
-impl Vec {
+impl Vec {
/// Constructs a new, empty `Vec`.
///
/// The vector will not allocate until elements are pushed onto it.
@@ -607,7 +616,8 @@ impl Vec {
pub const fn new_in(alloc: A) -> Self {
Vec { buf: RawVec::new_in(alloc), len: 0 }
}
-
+}
+impl Vec {
/// Constructs a new, empty `Vec` with at least the specified capacity
/// with the provided allocator.
///
@@ -669,7 +679,18 @@ impl Vec {
pub fn with_capacity_in(capacity: usize, alloc: A) -> Self {
Vec { buf: RawVec::with_capacity_in(capacity, alloc), len: 0 }
}
-
+}
+impl Vec {
+ /// Like the other `with_capacity_in`, but returns `Err` instead of
+ /// panicking when allocation size exceeds `isize::MAX` or
+ /// aborting on allocation failure.
+ #[inline]
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ pub fn try_with_capacity_in(capacity: usize, alloc: A) -> Result {
+ Ok(Vec { buf: RawVec::try_with_capacity_in(capacity, alloc)?, len: 0 })
+ }
+}
+impl Vec {
/// Creates a `Vec` directly from a pointer, a capacity, a length,
/// and an allocator.
///
@@ -885,7 +906,8 @@ impl Vec {
pub fn capacity(&self) -> usize {
self.buf.capacity()
}
-
+}
+impl Vec {
/// Reserves capacity for at least `additional` more elements to be inserted
/// in the given `Vec`. The collection may reserve more space to
/// speculatively avoid frequent reallocations. After calling `reserve`,
@@ -1106,7 +1128,56 @@ impl Vec {
buf.into_box(len).assume_init()
}
}
+}
+impl Vec {
+ /// Identical to `try_reserve`
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ pub fn reserve(&mut self, additional: usize) -> Result<(), TryReserveError> {
+ self.buf.reserve(self.len, additional)
+ }
+
+ /// Identical to `try_reserve_exact`
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ pub fn reserve_exact(&mut self, additional: usize) -> Result<(), TryReserveError> {
+ self.buf.reserve_exact(self.len, additional)
+ }
+
+ /// Fallible version of `shrink_to_fit`
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ pub fn shrink_to_fit(&mut self) -> Result<(), TryReserveError> {
+ // The capacity is never less than the length, and there's nothing to do when
+ // they are equal, so we can avoid the panic case in `RawVec::shrink_to_fit`
+ // by only calling it with a greater capacity.
+ if self.capacity() > self.len {
+ return self.buf.shrink_to_fit(self.len);
+ }
+
+ Ok(())
+ }
+
+ /// Fallible version of `shrink_to`
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ pub fn shrink_to(&mut self, min_capacity: usize) -> Result<(), TryReserveError> {
+ if self.capacity() > min_capacity {
+ return self.buf.shrink_to_fit(cmp::max(self.len, min_capacity));
+ }
+
+ Ok(())
+ }
+ /// Fallible version of `into_boxed_slice`
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ pub fn into_boxed_slice(mut self) -> Result, TryReserveError> {
+ unsafe {
+ self.shrink_to_fit()?;
+ let me = ManuallyDrop::new(self);
+ let buf = ptr::read(&me.buf);
+ let len = me.len();
+ Ok(buf.into_box(len).assume_init())
+ }
+ }
+}
+impl Vec {
/// Shortens the vector, keeping the first `len` elements and dropping
/// the rest.
///
@@ -1468,7 +1539,8 @@ impl Vec {
value
}
}
-
+}
+impl Vec {
/// Inserts an element at position `index` within the vector, shifting all
/// elements after it to the right.
///
@@ -1523,7 +1595,52 @@ impl Vec {
self.set_len(len + 1);
}
}
+}
+impl Vec {
+ /// Fallible version of `insert`
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ pub fn insert(&mut self, index: usize, element: T) -> Result<(), (TryReserveError, T)> {
+ #[cold]
+ #[inline(never)]
+ fn assert_failed(index: usize, len: usize) -> ! {
+ panic!("insertion index (is {index}) should be <= len (is {len})");
+ }
+
+ let len = self.len();
+
+ // space for the new element
+ if len == self.buf.capacity() {
+ match self.reserve(1) {
+ Err(e) => return Err((e, element)),
+ Ok(_) => (),
+ }
+ }
+
+ unsafe {
+ // infallible
+ // The spot to put the new value
+ {
+ let p = self.as_mut_ptr().add(index);
+ if index < len {
+ // Shift everything over to make space. (Duplicating the
+ // `index`th element into two consecutive places.)
+ ptr::copy(p, p.add(1), len - index);
+ } else if index == len {
+ // No elements need shifting.
+ } else {
+ assert_failed(index, len);
+ }
+ // Write it in, overwriting the first copy of the `index`th
+ // element.
+ ptr::write(p, element);
+ }
+ self.set_len(len + 1);
+ }
+ Ok(())
+ }
+}
+impl Vec {
/// Removes and returns the element at position `index` within the vector,
/// shifting all elements after it to the left.
///
@@ -1650,14 +1767,14 @@ impl Vec {
// This drop guard will be invoked when predicate or `drop` of element panicked.
// It shifts unchecked elements to cover holes and `set_len` to the correct length.
// In cases when predicate and `drop` never panick, it will be optimized out.
- struct BackshiftOnDrop<'a, T, A: Allocator> {
- v: &'a mut Vec,
+ struct BackshiftOnDrop<'a, T, A: Allocator, FH: FailureHandling> {
+ v: &'a mut Vec,
processed_len: usize,
deleted_cnt: usize,
original_len: usize,
}
- impl Drop for BackshiftOnDrop<'_, T, A> {
+ impl Drop for BackshiftOnDrop<'_, T, A, FH> {
fn drop(&mut self) {
if self.deleted_cnt > 0 {
// SAFETY: Trailing unchecked items must be valid since we never touch them.
@@ -1678,10 +1795,10 @@ impl Vec {
let mut g = BackshiftOnDrop { v: self, processed_len: 0, deleted_cnt: 0, original_len };
- fn process_loop(
+ fn process_loop(
original_len: usize,
f: &mut F,
- g: &mut BackshiftOnDrop<'_, T, A>,
+ g: &mut BackshiftOnDrop<'_, T, A, FH>,
) where
F: FnMut(&mut T) -> bool,
{
@@ -1714,10 +1831,10 @@ impl Vec {
}
// Stage 1: Nothing was deleted.
- process_loop::(original_len, &mut f, &mut g);
+ process_loop::(original_len, &mut f, &mut g);
// Stage 2: Some elements were deleted.
- process_loop::(original_len, &mut f, &mut g);
+ process_loop::(original_len, &mut f, &mut g);
// All item are processed. This can be optimized to `set_len` by LLVM.
drop(g);
@@ -1776,7 +1893,7 @@ impl Vec {
}
/* INVARIANT: vec.len() > read >= write > write-1 >= 0 */
- struct FillGapOnDrop<'a, T, A: core::alloc::Allocator> {
+ struct FillGapOnDrop<'a, T, A: Allocator, FH: FailureHandling> {
/* Offset of the element we want to check if it is duplicate */
read: usize,
@@ -1785,10 +1902,10 @@ impl Vec {
write: usize,
/* The Vec that would need correction if `same_bucket` panicked */
- vec: &'a mut Vec,
+ vec: &'a mut Vec,
}
- impl<'a, T, A: core::alloc::Allocator> Drop for FillGapOnDrop<'a, T, A> {
+ impl<'a, T, A: Allocator, FH: FailureHandling> Drop for FillGapOnDrop<'a, T, A, FH> {
fn drop(&mut self) {
/* This code gets executed when `same_bucket` panics */
@@ -1860,7 +1977,8 @@ impl Vec {
mem::forget(gap);
}
}
-
+}
+impl Vec {
/// Appends an element to the back of a collection.
///
/// # Panics
@@ -1889,7 +2007,30 @@ impl Vec {
self.len += 1;
}
}
+}
+impl Vec {
+ /// Fallible version of `push`
+ #[inline]
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ pub fn push(&mut self, value: T) -> Result<(), (TryReserveError, T)> {
+ // This will panic or abort if we would allocate > isize::MAX bytes
+ // or if the length increment would overflow for zero-sized types.
+ if self.len == self.buf.capacity() {
+ match self.buf.reserve_for_push(self.len) {
+ Err(e) => return Err((e, value)),
+ Ok(_) => (),
+ }
+ }
+ unsafe {
+ let end = self.as_mut_ptr().add(self.len);
+ ptr::write(end, value);
+ self.len += 1;
+ }
+ Ok(())
+ }
+}
+impl Vec {
/// Appends an element if there is sufficient spare capacity, otherwise an error is returned
/// with the element.
///
@@ -1963,7 +2104,8 @@ impl Vec {
}
}
}
-
+}
+impl Vec {
/// Moves all the elements of `other` into `self`, leaving `other` empty.
///
/// # Panics
@@ -1999,7 +2141,33 @@ impl Vec {
unsafe { ptr::copy_nonoverlapping(other as *const T, self.as_mut_ptr().add(len), count) };
self.len += count;
}
+}
+impl Vec {
+ /// Fallible version of `append`
+ #[inline]
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ pub fn append(&mut self, other: &mut Self) -> Result<(), TryReserveError> {
+ unsafe {
+ self.append_elements(other.as_slice() as _)?;
+ other.set_len(0);
+ }
+
+ Ok(())
+ }
+
+ /// Appends elements to `self` from other buffer.
+ #[inline]
+ unsafe fn append_elements(&mut self, other: *const [T]) -> Result<(), TryReserveError> {
+ let count = unsafe { (*other).len() };
+ self.reserve(count)?;
+ let len = self.len();
+ unsafe { ptr::copy_nonoverlapping(other as *const T, self.as_mut_ptr().add(len), count) };
+ self.len += count;
+ Ok(())
+ }
+}
+impl Vec {
/// Removes the specified range from the vector in bulk, returning all
/// removed elements as an iterator. If the iterator is dropped before
/// being fully consumed, it drops the remaining removed elements.
@@ -2031,7 +2199,7 @@ impl Vec {
/// assert_eq!(v, &[]);
/// ```
#[stable(feature = "drain", since = "1.6.0")]
- pub fn drain(&mut self, range: R) -> Drain<'_, T, A>
+ pub fn drain(&mut self, range: R) -> Drain<'_, T, A, FH>
where
R: RangeBounds,
{
@@ -2052,7 +2220,7 @@ impl Vec {
// set self.vec length's to start, to be safe in case Drain is leaked
self.set_len(start);
let range_slice = slice::from_raw_parts(self.as_ptr().add(start), end - start);
- Drain {
+ Drain::<'_, _, _, FH> {
tail_start: end,
tail_len: len - end,
iter: range_slice.iter(),
@@ -2122,7 +2290,8 @@ impl Vec {
pub fn is_empty(&self) -> bool {
self.len() == 0
}
-
+}
+impl Vec {
/// Splits the collection into two at the given index.
///
/// Returns a newly allocated vector containing the elements in the range
@@ -2220,7 +2389,64 @@ impl Vec {
self.truncate(new_len);
}
}
+}
+impl Vec {
+ /// Fallible version fo `split_off`
+ #[inline]
+ #[must_use = "use `.truncate()` if you don't need the other half"]
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ pub fn split_off(&mut self, at: usize) -> Result
+ where
+ A: Clone,
+ {
+ #[cold]
+ #[inline(never)]
+ fn assert_failed(at: usize, len: usize) -> ! {
+ panic!("`at` split index (is {at}) should be <= len (is {len})");
+ }
+
+ if at > self.len() {
+ assert_failed(at, self.len());
+ }
+
+ if at == 0 {
+ // the new vector can take over the original buffer and avoid the copy
+ return Ok(mem::replace(
+ self,
+ Vec::try_with_capacity_in(self.capacity(), self.allocator().clone())?,
+ ));
+ }
+
+ let other_len = self.len - at;
+ let mut other = Vec::try_with_capacity_in(other_len, self.allocator().clone())?;
+
+ // Unsafely `set_len` and copy items to `other`.
+ unsafe {
+ self.set_len(at);
+ other.set_len(other_len);
+
+ ptr::copy_nonoverlapping(self.as_ptr().add(at), other.as_mut_ptr(), other.len());
+ }
+ Ok(other)
+ }
+
+ /// Fallible version fo `resize_with`
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ pub fn resize_with(&mut self, new_len: usize, f: F) -> Result<(), TryReserveError>
+ where
+ F: FnMut() -> T,
+ {
+ let len = self.len();
+ if new_len > len {
+ self.extend_trusted(iter::repeat_with(f).take(new_len - len))?;
+ } else {
+ self.truncate(new_len);
+ }
+ Ok(())
+ }
+}
+impl Vec {
/// Consumes and leaks the `Vec`, returning a mutable reference to the contents,
/// `&'a mut [T]`. Note that the type `T` must outlive the chosen lifetime
/// `'a`. If the type has only static references, or none at all, then this
@@ -2383,7 +2609,7 @@ impl Vec {
}
}
-impl Vec {
+impl Vec {
/// Resizes the `Vec` in-place so that `len` is equal to `new_len`.
///
/// If `new_len` is greater than `len`, the `Vec` is extended by the
@@ -2481,8 +2707,47 @@ impl Vec {
}
}
}
+impl Vec {
+ /// Fallible version of `resize`
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ pub fn resize(&mut self, new_len: usize, value: T) -> Result<(), TryReserveError> {
+ let len = self.len();
-impl Vec<[T; N], A> {
+ if new_len > len {
+ return self.extend_with(new_len - len, value);
+ } else {
+ self.truncate(new_len);
+ }
+
+ Ok(())
+ }
+
+ /// Fallible version of `extend_from_slice`
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ pub fn extend_from_slice(&mut self, other: &[T]) -> Result<(), TryReserveError> {
+ self.try_spec_extend(other.iter())
+ }
+
+ /// Fallible version of `extend_from_within`
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ pub fn extend_from_within(&mut self, src: R) -> Result<(), TryReserveError>
+ where
+ R: RangeBounds,
+ {
+ let range = slice::range(src, ..self.len());
+ self.reserve(range.len())?;
+
+ // SAFETY:
+ // - `slice::range` guarantees that the given range is valid for indexing self
+ unsafe {
+ self.spec_extend_from_within(range);
+ }
+
+ Ok(())
+ }
+}
+
+impl Vec<[T; N], A, FH> {
/// Takes a `Vec<[T; N]>` and flattens it into a `Vec`.
///
/// # Panics
@@ -2527,7 +2792,7 @@ impl Vec<[T; N], A> {
}
}
-impl Vec {
+impl Vec {
#[cfg(not(no_global_oom_handling))]
/// Extend the vector by `n` clones of value.
fn extend_with(&mut self, n: usize, value: T) {
@@ -2558,8 +2823,40 @@ impl Vec {
}
}
}
+impl