Skip to content

Commit 9d222e9

Browse files
committed
std: make ReentrantMutex movable and const; simplify Stdout initialization
1 parent 75b7e52 commit 9d222e9

File tree

11 files changed

+60
-173
lines changed

11 files changed

+60
-173
lines changed

library/std/src/io/stdio.rs

+37-36
Original file line numberDiff line numberDiff line change
@@ -5,10 +5,9 @@ mod tests;
55

66
use crate::io::prelude::*;
77

8-
use crate::cell::{Cell, RefCell};
8+
use crate::cell::{Cell, RefCell, RefMut};
99
use crate::fmt;
1010
use crate::io::{self, BufReader, IoSlice, IoSliceMut, LineWriter, Lines};
11-
use crate::pin::Pin;
1211
use crate::sync::atomic::{AtomicBool, Ordering};
1312
use crate::sync::{Arc, Mutex, MutexGuard, OnceLock};
1413
use crate::sys::stdio;
@@ -526,7 +525,7 @@ pub struct Stdout {
526525
// FIXME: this should be LineWriter or BufWriter depending on the state of
527526
// stdout (tty or not). Note that if this is not line buffered it
528527
// should also flush-on-panic or some form of flush-on-abort.
529-
inner: Pin<&'static ReentrantMutex<RefCell<LineWriter<StdoutRaw>>>>,
528+
inner: &'static ReentrantMutex<RefCell<Option<LineWriter<StdoutRaw>>>>,
530529
}
531530

532531
/// A locked reference to the [`Stdout`] handle.
@@ -548,10 +547,11 @@ pub struct Stdout {
548547
#[must_use = "if unused stdout will immediately unlock"]
549548
#[stable(feature = "rust1", since = "1.0.0")]
550549
pub struct StdoutLock<'a> {
551-
inner: ReentrantMutexGuard<'a, RefCell<LineWriter<StdoutRaw>>>,
550+
inner: ReentrantMutexGuard<'a, RefCell<Option<LineWriter<StdoutRaw>>>>,
552551
}
553552

554-
static STDOUT: OnceLock<ReentrantMutex<RefCell<LineWriter<StdoutRaw>>>> = OnceLock::new();
553+
static STDOUT: ReentrantMutex<RefCell<Option<LineWriter<StdoutRaw>>>> =
554+
ReentrantMutex::new(RefCell::new(None));
555555

556556
/// Constructs a new handle to the standard output of the current process.
557557
///
@@ -602,25 +602,18 @@ static STDOUT: OnceLock<ReentrantMutex<RefCell<LineWriter<StdoutRaw>>>> = OnceLo
602602
#[must_use]
603603
#[stable(feature = "rust1", since = "1.0.0")]
604604
pub fn stdout() -> Stdout {
605-
Stdout {
606-
inner: Pin::static_ref(&STDOUT).get_or_init_pin(
607-
|| unsafe { ReentrantMutex::new(RefCell::new(LineWriter::new(stdout_raw()))) },
608-
|mutex| unsafe { mutex.init() },
609-
),
610-
}
605+
Stdout { inner: &STDOUT }
611606
}
612607

613608
pub fn cleanup() {
614-
if let Some(instance) = STDOUT.get() {
615-
// Flush the data and disable buffering during shutdown
616-
// by replacing the line writer by one with zero
617-
// buffering capacity.
618-
// We use try_lock() instead of lock(), because someone
619-
// might have leaked a StdoutLock, which would
620-
// otherwise cause a deadlock here.
621-
if let Some(lock) = Pin::static_ref(instance).try_lock() {
622-
*lock.borrow_mut() = LineWriter::with_capacity(0, stdout_raw());
623-
}
609+
// Flush the data and disable buffering during shutdown
610+
// by replacing the line writer by one with zero
611+
// buffering capacity.
612+
// We use try_lock() instead of lock(), because someone
613+
// might have leaked a StdoutLock, which would
614+
// otherwise cause a deadlock here.
615+
if let Some(lock) = STDOUT.try_lock() {
616+
*lock.borrow_mut() = Some(LineWriter::with_capacity(0, stdout_raw()));
624617
}
625618
}
626619

@@ -712,26 +705,38 @@ impl Write for &Stdout {
712705
}
713706
}
714707

708+
impl StdoutLock<'_> {
709+
#[inline]
710+
fn inner(&self) -> RefMut<'_, LineWriter<StdoutRaw>> {
711+
#[cold]
712+
fn init() -> LineWriter<StdoutRaw> {
713+
LineWriter::new(stdout_raw())
714+
}
715+
716+
RefMut::map(self.inner.borrow_mut(), |w| w.get_or_insert_with(init))
717+
}
718+
}
719+
715720
#[stable(feature = "rust1", since = "1.0.0")]
716721
impl Write for StdoutLock<'_> {
717722
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
718-
self.inner.borrow_mut().write(buf)
723+
self.inner().write(buf)
719724
}
720725
fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
721-
self.inner.borrow_mut().write_vectored(bufs)
726+
self.inner().write_vectored(bufs)
722727
}
723728
#[inline]
724729
fn is_write_vectored(&self) -> bool {
725-
self.inner.borrow_mut().is_write_vectored()
730+
self.inner().is_write_vectored()
726731
}
727732
fn flush(&mut self) -> io::Result<()> {
728-
self.inner.borrow_mut().flush()
733+
self.inner().flush()
729734
}
730735
fn write_all(&mut self, buf: &[u8]) -> io::Result<()> {
731-
self.inner.borrow_mut().write_all(buf)
736+
self.inner().write_all(buf)
732737
}
733738
fn write_all_vectored(&mut self, bufs: &mut [IoSlice<'_>]) -> io::Result<()> {
734-
self.inner.borrow_mut().write_all_vectored(bufs)
739+
self.inner().write_all_vectored(bufs)
735740
}
736741
}
737742

@@ -761,7 +766,7 @@ impl fmt::Debug for StdoutLock<'_> {
761766
/// standard library or via raw Windows API calls, will fail.
762767
#[stable(feature = "rust1", since = "1.0.0")]
763768
pub struct Stderr {
764-
inner: Pin<&'static ReentrantMutex<RefCell<StderrRaw>>>,
769+
inner: &'static ReentrantMutex<RefCell<StderrRaw>>,
765770
}
766771

767772
/// A locked reference to the [`Stderr`] handle.
@@ -834,16 +839,12 @@ pub struct StderrLock<'a> {
834839
#[stable(feature = "rust1", since = "1.0.0")]
835840
pub fn stderr() -> Stderr {
836841
// Note that unlike `stdout()` we don't use `at_exit` here to register a
837-
// destructor. Stderr is not buffered , so there's no need to run a
842+
// destructor. Stderr is not buffered, so there's no need to run a
838843
// destructor for flushing the buffer
839-
static INSTANCE: OnceLock<ReentrantMutex<RefCell<StderrRaw>>> = OnceLock::new();
844+
static INSTANCE: ReentrantMutex<RefCell<StderrRaw>> =
845+
ReentrantMutex::new(RefCell::new(stderr_raw()));
840846

841-
Stderr {
842-
inner: Pin::static_ref(&INSTANCE).get_or_init_pin(
843-
|| unsafe { ReentrantMutex::new(RefCell::new(stderr_raw())) },
844-
|mutex| unsafe { mutex.init() },
845-
),
846-
}
847+
Stderr { inner: &INSTANCE }
847848
}
848849

849850
impl Stderr {

library/std/src/sync/once_lock.rs

-55
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,6 @@ use crate::fmt;
33
use crate::marker::PhantomData;
44
use crate::mem::MaybeUninit;
55
use crate::panic::{RefUnwindSafe, UnwindSafe};
6-
use crate::pin::Pin;
76
use crate::sync::Once;
87

98
/// A synchronization primitive which can be written to only once.
@@ -223,60 +222,6 @@ impl<T> OnceLock<T> {
223222
Ok(unsafe { self.get_unchecked() })
224223
}
225224

226-
/// Internal-only API that gets the contents of the cell, initializing it
227-
/// in two steps with `f` and `g` if the cell was empty.
228-
///
229-
/// `f` is called to construct the value, which is then moved into the cell
230-
/// and given as a (pinned) mutable reference to `g` to finish
231-
/// initialization.
232-
///
233-
/// This allows `g` to inspect an manipulate the value after it has been
234-
/// moved into its final place in the cell, but before the cell is
235-
/// considered initialized.
236-
///
237-
/// # Panics
238-
///
239-
/// If `f` or `g` panics, the panic is propagated to the caller, and the
240-
/// cell remains uninitialized.
241-
///
242-
/// With the current implementation, if `g` panics, the value from `f` will
243-
/// not be dropped. This should probably be fixed if this is ever used for
244-
/// a type where this matters.
245-
///
246-
/// It is an error to reentrantly initialize the cell from `f`. The exact
247-
/// outcome is unspecified. Current implementation deadlocks, but this may
248-
/// be changed to a panic in the future.
249-
pub(crate) fn get_or_init_pin<F, G>(self: Pin<&Self>, f: F, g: G) -> Pin<&T>
250-
where
251-
F: FnOnce() -> T,
252-
G: FnOnce(Pin<&mut T>),
253-
{
254-
if let Some(value) = self.get_ref().get() {
255-
// SAFETY: The inner value was already initialized, and will not be
256-
// moved anymore.
257-
return unsafe { Pin::new_unchecked(value) };
258-
}
259-
260-
let slot = &self.value;
261-
262-
// Ignore poisoning from other threads
263-
// If another thread panics, then we'll be able to run our closure
264-
self.once.call_once_force(|_| {
265-
let value = f();
266-
// SAFETY: We use the Once (self.once) to guarantee unique access
267-
// to the UnsafeCell (slot).
268-
let value: &mut T = unsafe { (&mut *slot.get()).write(value) };
269-
// SAFETY: The value has been written to its final place in
270-
// self.value. We do not to move it anymore, which we promise here
271-
// with a Pin<&mut T>.
272-
g(unsafe { Pin::new_unchecked(value) });
273-
});
274-
275-
// SAFETY: The inner value has been initialized, and will not be moved
276-
// anymore.
277-
unsafe { Pin::new_unchecked(self.get_ref().get_unchecked()) }
278-
}
279-
280225
/// Consumes the `OnceLock`, returning the wrapped value. Returns
281226
/// `None` if the cell was empty.
282227
///

library/std/src/sys/hermit/mutex.rs

-3
Original file line numberDiff line numberDiff line change
@@ -174,9 +174,6 @@ impl Mutex {
174174
Mutex { inner: Spinlock::new(MutexInner::new()) }
175175
}
176176

177-
#[inline]
178-
pub unsafe fn init(&mut self) {}
179-
180177
#[inline]
181178
pub unsafe fn lock(&self) {
182179
loop {

library/std/src/sys/itron/mutex.rs

-6
Original file line numberDiff line numberDiff line change
@@ -31,12 +31,6 @@ impl Mutex {
3131
Mutex { mtx: SpinIdOnceCell::new() }
3232
}
3333

34-
pub unsafe fn init(&mut self) {
35-
// Initialize `self.mtx` eagerly
36-
let id = new_mtx().unwrap_or_else(|e| fail(e, &"acre_mtx"));
37-
unsafe { self.mtx.set_unchecked((id, ())) };
38-
}
39-
4034
/// Get the inner mutex's ID, which is lazily created.
4135
fn raw(&self) -> abi::ID {
4236
match self.mtx.get_or_try_init(|| new_mtx().map(|id| (id, ()))) {

library/std/src/sys/sgx/mutex.rs

-3
Original file line numberDiff line numberDiff line change
@@ -20,9 +20,6 @@ impl Mutex {
2020
Mutex { inner: SpinMutex::new(WaitVariable::new(false)) }
2121
}
2222

23-
#[inline]
24-
pub unsafe fn init(&mut self) {}
25-
2623
#[inline]
2724
pub unsafe fn lock(&self) {
2825
let mut guard = self.inner.lock();

library/std/src/sys/unix/locks/futex_mutex.rs

-3
Original file line numberDiff line numberDiff line change
@@ -19,9 +19,6 @@ impl Mutex {
1919
Self { futex: AtomicU32::new(0) }
2020
}
2121

22-
#[inline]
23-
pub unsafe fn init(&mut self) {}
24-
2522
#[inline]
2623
pub unsafe fn try_lock(&self) -> bool {
2724
self.futex.compare_exchange(0, 1, Acquire, Relaxed).is_ok()

library/std/src/sys/unix/locks/pthread_mutex.rs

+1-1
Original file line numberDiff line numberDiff line change
@@ -52,7 +52,7 @@ impl Mutex {
5252
Mutex { inner: UnsafeCell::new(libc::PTHREAD_MUTEX_INITIALIZER) }
5353
}
5454
#[inline]
55-
pub unsafe fn init(&mut self) {
55+
unsafe fn init(&mut self) {
5656
// Issue #33770
5757
//
5858
// A pthread mutex initialized with PTHREAD_MUTEX_INITIALIZER will have

library/std/src/sys/unsupported/locks/mutex.rs

-3
Original file line numberDiff line numberDiff line change
@@ -16,9 +16,6 @@ impl Mutex {
1616
Mutex { locked: Cell::new(false) }
1717
}
1818

19-
#[inline]
20-
pub unsafe fn init(&mut self) {}
21-
2219
#[inline]
2320
pub unsafe fn lock(&self) {
2421
assert_eq!(self.locked.replace(true), false, "cannot recursively acquire mutex");

library/std/src/sys/windows/locks/mutex.rs

-2
Original file line numberDiff line numberDiff line change
@@ -37,8 +37,6 @@ impl Mutex {
3737
pub const fn new() -> Mutex {
3838
Mutex { srwlock: UnsafeCell::new(c::SRWLOCK_INIT) }
3939
}
40-
#[inline]
41-
pub unsafe fn init(&mut self) {}
4240

4341
#[inline]
4442
pub unsafe fn lock(&self) {

0 commit comments

Comments
 (0)