From f2ac4ef085148d328abec0831c7caa77f460102d Mon Sep 17 00:00:00 2001 From: Joshua Liebow-Feeser Date: Wed, 16 Aug 2017 14:35:37 -0400 Subject: [PATCH 1/4] std::thread::LocalKeyState: Add state Initializing --- src/libstd/io/stdio.rs | 1 + src/libstd/sys/unix/fast_thread_local.rs | 16 - src/libstd/thread/local.rs | 450 ++++++++++++++++------- src/test/run-pass/tls-init-on-init.rs | 12 +- 4 files changed, 319 insertions(+), 160 deletions(-) diff --git a/src/libstd/io/stdio.rs b/src/libstd/io/stdio.rs index fb489bf487b8b..df8d132e1230b 100644 --- a/src/libstd/io/stdio.rs +++ b/src/libstd/io/stdio.rs @@ -676,6 +676,7 @@ fn print_to(args: fmt::Arguments, label: &str) where T: Write { let result = match local_s.state() { LocalKeyState::Uninitialized | + LocalKeyState::Initializing | LocalKeyState::Destroyed => global_s().write_fmt(args), LocalKeyState::Valid => { local_s.with(|s| { diff --git a/src/libstd/sys/unix/fast_thread_local.rs b/src/libstd/sys/unix/fast_thread_local.rs index 6cdbe5df75d51..99f4e720334b4 100644 --- a/src/libstd/sys/unix/fast_thread_local.rs +++ b/src/libstd/sys/unix/fast_thread_local.rs @@ -59,19 +59,3 @@ pub unsafe fn register_dtor(t: *mut u8, dtor: unsafe extern fn(*mut u8)) { // a more direct implementation. #[cfg(target_os = "fuchsia")] pub use sys_common::thread_local::register_dtor_fallback as register_dtor; - -pub fn requires_move_before_drop() -> bool { - // The macOS implementation of TLS apparently had an odd aspect to it - // where the pointer we have may be overwritten while this destructor - // is running. Specifically if a TLS destructor re-accesses TLS it may - // trigger a re-initialization of all TLS variables, paving over at - // least some destroyed ones with initial values. - // - // This means that if we drop a TLS value in place on macOS that we could - // revert the value to its original state halfway through the - // destructor, which would be bad! - // - // Hence, we use `ptr::read` on macOS (to move to a "safe" location) - // instead of drop_in_place. - cfg!(target_os = "macos") -} diff --git a/src/libstd/thread/local.rs b/src/libstd/thread/local.rs index 02347bf4906a6..558065aa48817 100644 --- a/src/libstd/thread/local.rs +++ b/src/libstd/thread/local.rs @@ -12,9 +12,7 @@ #![unstable(feature = "thread_local_internals", issue = "0")] -use cell::UnsafeCell; use fmt; -use mem; /// A thread local storage key which owns its contents. /// @@ -87,7 +85,7 @@ pub struct LocalKey { // but actual data inside will sometimes be tagged with #[thread_local]. // It's not valid for a true static to reference a #[thread_local] static, // so we get around that by exposing an accessor through a layer of function - // indirection (this thunk). + // indirection (the 'get' thunk). // // Note that the thunk is itself unsafe because the returned lifetime of the // slot where data lives, `'static`, is not actually valid. The lifetime @@ -97,10 +95,34 @@ pub struct LocalKey { // trivially devirtualizable by LLVM because the value of `inner` never // changes and the constant should be readonly within a crate. This mainly // only runs into problems when TLS statics are exported across crates. - inner: unsafe fn() -> Option<&'static UnsafeCell>>, - // initialization routine to invoke to create a value + // The user-supplied initialization function that constructs a new T. init: fn() -> T, + + // Platform-specific callbacks. + + // Get a reference to the value. get can be called at any time during the + // value's life cycle (Uninitialized, Initializing, Valid, and Destroyed). + // If it returns Some, then the key value is in the Valid state. Otherwise, + // if is in one of the other three states (use the get_state callback to + // check which one). + get: unsafe fn() -> &'static Option, + // Query the value's current state. + get_state: unsafe fn() -> LocalKeyState, + // Begin initialization, moving the value into the Initializing state, and + // performing any platform-specific initialization. + // + // After pre_init has been called, it must be safe to call rollback_init + // and then pre_init an arbitrary number of times - if init panics, we + // call rollback_init to move back to the Uninitialized state, and we + // may try to initialize again in a future call to with or try_with. + pre_init: unsafe fn(), + // Finalize initialization, using the provided value as the initial value + // for this key. Move the value into the Initialized state. + post_init: unsafe fn(T), + // Roll back a failed initialization (caused by init panicking). Move the + // value back into the Uninitialized state. + rollback_init: unsafe fn(), } #[stable(feature = "std_debug", since = "1.16.0")] @@ -161,26 +183,34 @@ macro_rules! thread_local { macro_rules! __thread_local_inner { ($(#[$attr:meta])* $vis:vis $name:ident, $t:ty, $init:expr) => { $(#[$attr])* $vis static $name: $crate::thread::LocalKey<$t> = { - fn __init() -> $t { $init } + #[thread_local] + #[cfg(target_thread_local)] + static __KEY: $crate::thread::__FastLocalKeyInner<$t> = + $crate::thread::__FastLocalKeyInner::new(); - unsafe fn __getit() -> $crate::option::Option< - &'static $crate::cell::UnsafeCell< - $crate::option::Option<$t>>> - { - #[thread_local] - #[cfg(target_thread_local)] - static __KEY: $crate::thread::__FastLocalKeyInner<$t> = - $crate::thread::__FastLocalKeyInner::new(); + #[cfg(not(target_thread_local))] + static __KEY: $crate::thread::__OsLocalKeyInner<$t> = + $crate::thread::__OsLocalKeyInner::new(); - #[cfg(not(target_thread_local))] - static __KEY: $crate::thread::__OsLocalKeyInner<$t> = - $crate::thread::__OsLocalKeyInner::new(); + unsafe fn __getit() -> &'static Option<$t> { __KEY.get() } - __KEY.get() - } + unsafe fn __get_state() -> $crate::thread::LocalKeyState { __KEY.get_state() } + + unsafe fn __pre_init() { __KEY.pre_init() } + + fn __init() -> $t { $init } + + unsafe fn __post_init(val: $t) { __KEY.post_init(val) } + + unsafe fn __rollback_init() { __KEY.rollback_init() } unsafe { - $crate::thread::LocalKey::new(__getit, __init) + $crate::thread::LocalKey::new(__getit, + __get_state, + __pre_init, + __init, + __post_init, + __rollback_init) } }; } @@ -192,9 +222,10 @@ macro_rules! __thread_local_inner { issue = "27716")] #[derive(Debug, Eq, PartialEq, Copy, Clone)] pub enum LocalKeyState { - /// All keys are in this state whenever a thread starts. Keys will - /// transition to the `Valid` state once the first call to [`with`] happens - /// and the initialization expression succeeds. + /// All keys are in this state whenever a thread starts. On the first call + /// to [`with`], keys are initialized - they are moved into the `Initializing` + /// state, then the initializer is called, and finally, if the initializer + /// succeeds (does not panic), they are moved into the `Valid` state. /// /// Keys in the `Uninitialized` state will yield a reference to the closure /// passed to [`with`] so long as the initialization routine does not panic. @@ -202,7 +233,23 @@ pub enum LocalKeyState { /// [`with`]: ../../std/thread/struct.LocalKey.html#method.with Uninitialized, - /// Once a key has been accessed successfully, it will enter the `Valid` + /// On the first call to [`with`], the key is moved into this state. After + /// The initialization routine successfully completes (does not panic), + /// the key is moved into the `Valid` state (if it does panic, the key is + /// moved back into the `Uninitialized` state). Any code that may be called + /// from the initialization routine for a particular key - and that may + /// also access that key itself - should make sure that the key is not + /// currently being initialized by either querying the key's state or by + /// calling [`try_with`] instead of [`with`]. + /// + /// Keys in the `Initializing` state will trigger a panic when accessed via + /// [`with`]. + /// + /// [`try_with`]: ../../std/thread/struct.LocalKey.html#method.try_with + /// [`with`]: ../../std/thread/struct.LocalKey.html#method.with + Initializing, + + /// Once a key has been initialized successfully, it will enter the `Valid` /// state. Keys in the `Valid` state will remain so until the thread exits, /// at which point the destructor will be run and the key will enter the /// `Destroyed` state. @@ -217,7 +264,7 @@ pub enum LocalKeyState { /// necessary). While a destructor is running, and possibly after a /// destructor has run, a key is in the `Destroyed` state. /// - /// Keys in the `Destroyed` states will trigger a panic when accessed via + /// Keys in the `Destroyed` state will trigger a panic when accessed via /// [`with`]. /// /// [`with`]: ../../std/thread/struct.LocalKey.html#method.with @@ -255,13 +302,22 @@ impl LocalKey { #[unstable(feature = "thread_local_internals", reason = "recently added to create a key", issue = "0")] - pub const unsafe fn new(inner: unsafe fn() -> Option<&'static UnsafeCell>>, - init: fn() -> T) -> LocalKey { - LocalKey { - inner, - init, - } - } + pub const unsafe fn new(get: unsafe fn() -> &'static Option, + get_state: unsafe fn() -> LocalKeyState, + pre_init: unsafe fn(), + init: fn() -> T, + post_init: unsafe fn(T), + rollback_init: unsafe fn()) + -> LocalKey { + LocalKey { + get, + get_state, + pre_init, + init, + post_init, + rollback_init, + } + } /// Acquires a reference to the value in this TLS key. /// @@ -270,44 +326,56 @@ impl LocalKey { /// /// # Panics /// - /// This function will `panic!()` if the key currently has its - /// destructor running, and it **may** panic if the destructor has - /// previously been run for this thread. + /// This function will `panic!()` if the key is currently being initialized + /// or destructed, and it **may** panic if the destructor has previously + /// been run for this thread. #[stable(feature = "rust1", since = "1.0.0")] pub fn with(&'static self, f: F) -> R where F: FnOnce(&T) -> R { - self.try_with(f).expect("cannot access a TLS value during or \ - after it is destroyed") + self.try_with(f).expect("cannot access a TLS value while it is being initialized \ + or during or after it is destroyed") } - unsafe fn init(&self, slot: &UnsafeCell>) -> &T { - // Execute the initialization up front, *then* move it into our slot, - // just in case initialization fails. - let value = (self.init)(); - let ptr = slot.get(); + unsafe fn init(&self) { + struct RollbackOnPanic { + panicking: bool, + rollback: unsafe fn(), + } - // note that this can in theory just be `*ptr = Some(value)`, but due to - // the compiler will currently codegen that pattern with something like: - // - // ptr::drop_in_place(ptr) - // ptr::write(ptr, Some(value)) + impl Drop for RollbackOnPanic { + fn drop(&mut self) { + if self.panicking { + unsafe { (self.rollback)() } + } + } + } + + let mut reset = RollbackOnPanic { + panicking: true, + rollback: self.rollback_init, + }; + + // Transition into Valid, perform any pre-init work (e.g., registering + // destructors). If self.init panics, the Drop method of RollbackOnPanic + // will call self.rollback, rolling back the state to Uninitialized. // - // Due to this pattern it's possible for the destructor of the value in - // `ptr` (e.g. if this is being recursively initialized) to re-access - // TLS, in which case there will be a `&` and `&mut` pointer to the same - // value (an aliasing violation). To avoid setting the "I'm running a - // destructor" flag we just use `mem::replace` which should sequence the - // operations a little differently and make this safe to call. - mem::replace(&mut *ptr, Some(value)); - - (*ptr).as_ref().unwrap() + // Note that once self.pre_init has been called once, it must be safe to call + // self.rollback_init and then self.pre_init an arbitrary number of times - if + // self.init panics, a future call to with or try_with will again try to + // initialize the value, causing this routine to be run. + (self.pre_init)(); + let val = (self.init)(); + reset.panicking = false; + // Transition into Valid, set the value to val. + (self.post_init)(val); } /// Query the current state of this key. /// /// A key is initially in the `Uninitialized` state whenever a thread /// starts. It will remain in this state up until the first call to [`with`] - /// within a thread has run the initialization expression successfully. + /// or [`try_with`]. At this point, it will transition to the `Initializing` + /// state for the duration of the initialization. /// /// Once the initialization expression succeeds, the key transitions to the /// `Valid` state which will guarantee that future calls to [`with`] will @@ -321,33 +389,25 @@ impl LocalKey { /// /// Keys in the `Uninitialized` state can be accessed so long as the /// initialization does not panic. Keys in the `Valid` state are guaranteed - /// to be able to be accessed. Keys in the `Destroyed` state will panic on - /// any call to [`with`]. + /// to be able to be accessed. Keys in the `Initializing` or `Destroyed` + /// states will panic on any call to [`with`]. /// /// [`with`]: ../../std/thread/struct.LocalKey.html#method.with + /// [`try_with`]: ../../std/thread/struct.LocalKey.html#method.try_with /// [`Copy`]: ../../std/marker/trait.Copy.html #[unstable(feature = "thread_local_state", reason = "state querying was recently added", issue = "27716")] pub fn state(&'static self) -> LocalKeyState { - unsafe { - match (self.inner)() { - Some(cell) => { - match *cell.get() { - Some(..) => LocalKeyState::Valid, - None => LocalKeyState::Uninitialized, - } - } - None => LocalKeyState::Destroyed, - } - } + unsafe { (self.get_state)() } } /// Acquires a reference to the value in this TLS key. /// /// This will lazily initialize the value if this thread has not referenced /// this key yet. If the key has been destroyed (which may happen if this is called - /// in a destructor), this function will return a ThreadLocalError. + /// in a destructor) or is being initialized (which may happen if this is called in + /// the key's initialization expression), this function will return a ThreadLocalError. /// /// # Panics /// @@ -359,13 +419,21 @@ impl LocalKey { pub fn try_with(&'static self, f: F) -> Result where F: FnOnce(&T) -> R { unsafe { - let slot = (self.inner)().ok_or(AccessError { - _private: (), - })?; - Ok(f(match *slot.get() { - Some(ref inner) => inner, - None => self.init(slot), - })) + if let &Some(ref inner) = (self.get)() { + Ok(f(inner)) + } else { + match self.state() { + LocalKeyState::Uninitialized => { + self.init(); + // Now that the value is initialized, we're guaranteed + // not to enter this else block in the recursive call. + self.try_with(f) + } + LocalKeyState::Initializing | + LocalKeyState::Destroyed => Err(AccessError { _private: ()}), + LocalKeyState::Valid => unreachable!(), + } + } } } } @@ -377,15 +445,18 @@ pub mod fast { use fmt; use mem; use ptr; - use sys::fast_thread_local::{register_dtor, requires_move_before_drop}; + use sys::fast_thread_local::register_dtor; + use thread::LocalKeyState; pub struct Key { inner: UnsafeCell>, + state: UnsafeCell, - // Metadata to keep track of the state of the destructor. Remember that - // these variables are thread-local, not global. + // Keep track of whether the destructor has been registered (this is + // not the same thing as not being in the Uninitialized state - we + // can transition back into that state in rollback_init). Remember + // that this variable is thread-local, not global. dtor_registered: Cell, - dtor_running: Cell, } impl fmt::Debug for Key { @@ -398,17 +469,44 @@ pub mod fast { pub const fn new() -> Key { Key { inner: UnsafeCell::new(None), + state: UnsafeCell::new(LocalKeyState::Uninitialized), dtor_registered: Cell::new(false), - dtor_running: Cell::new(false) } } - pub unsafe fn get(&self) -> Option<&'static UnsafeCell>> { - if mem::needs_drop::() && self.dtor_running.get() { - return None + pub fn get(&self) -> &'static Option { + unsafe { &*self.inner.get() } + } + + pub fn get_state(&self) -> LocalKeyState { + unsafe { *self.state.get() } + } + + pub fn pre_init(&self) { + unsafe { + // It's critical that we set the state to Initializing before + // registering destructors - if registering destructors causes + // allocation, and the global allocator uses TLS, then the + // allocator needs to be able to detect that the TLS is in + // the Initializing state and perform appropriate fallback + // logic rather than recursing infinitely. + *self.state.get() = LocalKeyState::Initializing; + self.register_dtor(); + } + } + + pub fn post_init(&self, val: T) { + unsafe { + *self.inner.get() = Some(val); + *self.state.get() = LocalKeyState::Valid; + } + } + + pub fn rollback_init(&self) { + unsafe { + *self.inner.get() = None; + *self.state.get() = LocalKeyState::Uninitialized; } - self.register_dtor(); - Some(&*(&self.inner as *const _)) } unsafe fn register_dtor(&self) { @@ -424,36 +522,35 @@ pub mod fast { unsafe extern fn destroy_value(ptr: *mut u8) { let ptr = ptr as *mut Key; - // Right before we run the user destructor be sure to flag the - // destructor as running for this thread so calls to `get` will return - // `None`. - (*ptr).dtor_running.set(true); - - // Some implementations may require us to move the value before we drop - // it as it could get re-initialized in-place during destruction. - // - // Hence, we use `ptr::read` on those platforms (to move to a "safe" - // location) instead of drop_in_place. - if requires_move_before_drop() { - ptr::read((*ptr).inner.get()); - } else { - ptr::drop_in_place((*ptr).inner.get()); - } + let tmp = ptr::read((*ptr).inner.get()); + // Set inner to None and set the state to Destroyed before we drop tmp + // so that a recursive call to get (called from the destructor) will + // be able to detect that the value is already being dropped. + ptr::write((*ptr).inner.get(), None); + *(*ptr).state.get() = LocalKeyState::Destroyed; + drop(tmp); } } #[doc(hidden)] pub mod os { - use cell::{Cell, UnsafeCell}; + use cell::UnsafeCell; use fmt; - use marker; - use ptr; use sys_common::thread_local::StaticKey as OsStaticKey; + use thread::LocalKeyState; pub struct Key { // OS-TLS key that we'll use to key off. os: OsStaticKey, - marker: marker::PhantomData>, + // The state of this key. os is only guaranteed to point to an + // allocated Value if this state is Valid. + state: UnsafeCell, + // Store a value here in addition to in the OsStaticKey itself so + // that if the OsStaticKey hasn't yet been allocated, we still have + // an Option that we can return a reference to. Unlike the real + // value, this value will always be None (because when the real + // value is initialized, we just use it directly). + dummy_value: UnsafeCell>, } impl fmt::Debug for Key { @@ -473,44 +570,68 @@ pub mod os { pub const fn new() -> Key { Key { os: OsStaticKey::new(Some(destroy_value::)), - marker: marker::PhantomData + state: UnsafeCell::new(LocalKeyState::Uninitialized), + dummy_value: UnsafeCell::new(None), } } - pub unsafe fn get(&'static self) -> Option<&'static UnsafeCell>> { - let ptr = self.os.get() as *mut Value; - if !ptr.is_null() { - if ptr as usize == 1 { - return None + pub fn get(&self) -> &'static Option { + unsafe { + match *self.state.get() { + LocalKeyState::Valid => { + // Since the state is Valid, we know that os points to + // an allocated Value. + let ptr = self.os.get() as *mut Value; + debug_assert!(!ptr.is_null()); + &*(*ptr).value.get() + } + _ => { + // The dummy_value is guaranteed to always be None. This + // allows us to avoid allocating if the state isn't Valid. + // This is critical because it means that an allocator can + // call try_with and detect that the key is in state + // Initializing without recursing infinitely. + &*self.dummy_value.get() + } } - return Some(&(*ptr).value); } + } + + pub fn get_state(&self) -> LocalKeyState { + unsafe { *self.state.get() } + } - // If the lookup returned null, we haven't initialized our own - // local copy, so do that now. - let ptr: Box> = box Value { - key: self, - value: UnsafeCell::new(None), - }; - let ptr = Box::into_raw(ptr); - self.os.set(ptr as *mut u8); - Some(&(*ptr).value) + pub fn pre_init(&self) { + unsafe { + *self.state.get() = LocalKeyState::Initializing; + } + } + + pub fn rollback_init(&self) { + unsafe { + *self.state.get() = LocalKeyState::Uninitialized; + } + } + + pub fn post_init(&self, val: T) { + unsafe { + let ptr: Box> = box Value { + key: &*(self as *const _), + value: UnsafeCell::new(Some(val)), + }; + let ptr = Box::into_raw(ptr); + self.os.set(ptr as *mut u8); + *self.state.get() = LocalKeyState::Valid; + } } } unsafe extern fn destroy_value(ptr: *mut u8) { - // The OS TLS ensures that this key contains a NULL value when this - // destructor starts to run. We set it back to a sentinel value of 1 to - // ensure that any future calls to `get` for this thread will return - // `None`. - // - // Note that to prevent an infinite loop we reset it back to null right - // before we return from the destructor ourselves. let ptr = Box::from_raw(ptr as *mut Value); - let key = ptr.key; - key.os.set(1 as *mut u8); + // Set the state to Destroyed before we drop ptr so that any recursive + // calls to get can detect that the destructor is already being called. + *ptr.key.state.get() = LocalKeyState::Destroyed; drop(ptr); - key.os.set(ptr::null_mut()); } } @@ -561,7 +682,7 @@ mod tests { } } fn foo() -> Foo { - assert!(FOO.state() == LocalKeyState::Uninitialized); + assert!(FOO.state() == LocalKeyState::Initializing); Foo } thread_local!(static FOO: Foo = foo()); @@ -590,7 +711,49 @@ mod tests { } #[test] - fn circular() { + fn circular_init() { + struct S1; + struct S2; + thread_local!(static K1: S1 = S1::new()); + thread_local!(static K2: S2 = S2::new()); + static mut HITS: u32 = 0; + + impl S1 { + fn new() -> S1 { + unsafe { + HITS += 1; + if K2.state() == LocalKeyState::Initializing { + assert_eq!(HITS, 3); + } else { + if HITS == 1 { + K2.with(|_| {}); + } else { + assert_eq!(HITS, 3); + } + } + } + S1 + } + } + impl S2 { + fn new() -> S2 { + unsafe { + HITS += 1; + assert!(K1.state() != LocalKeyState::Initializing); + assert_eq!(HITS, 2); + K1.with(|_| {}); + } + S2 + } + } + + thread::spawn(move|| { + drop(S1::new()); + }).join().ok().unwrap(); + } + + #[test] + fn circular_drop() { struct S1; struct S2; thread_local!(static K1: UnsafeCell> = UnsafeCell::new(None)); @@ -630,7 +793,22 @@ mod tests { } #[test] - fn self_referential() { + fn self_referential_init() { + struct S1; + thread_local!(static K1: S1 = S1::new()); + + impl S1 { + fn new() -> S1 { + assert!(K1.state() == LocalKeyState::Initializing); + S1 + } + } + + thread::spawn(move|| K1.with(|_| {})).join().ok().unwrap(); + } + + #[test] + fn self_referential_drop() { struct S1; thread_local!(static K1: UnsafeCell> = UnsafeCell::new(None)); diff --git a/src/test/run-pass/tls-init-on-init.rs b/src/test/run-pass/tls-init-on-init.rs index b44c535d3a487..0514f3e3698fd 100644 --- a/src/test/run-pass/tls-init-on-init.rs +++ b/src/test/run-pass/tls-init-on-init.rs @@ -24,7 +24,7 @@ static CNT: AtomicUsize = ATOMIC_USIZE_INIT; impl Foo { fn init() -> Foo { let cnt = CNT.fetch_add(1, Ordering::SeqCst); - if cnt == 0 { + if FOO.state() == LocalKeyState::Uninitialized { FOO.with(|_| {}); } Foo { cnt: cnt } @@ -34,20 +34,16 @@ impl Foo { impl Drop for Foo { fn drop(&mut self) { if self.cnt == 1 { - FOO.with(|foo| assert_eq!(foo.cnt, 0)); + assert_eq!(FOO.state(), LocalKeyState::Destroyed); } else { assert_eq!(self.cnt, 0); - match FOO.state() { - LocalKeyState::Valid => panic!("should not be in valid state"), - LocalKeyState::Uninitialized | - LocalKeyState::Destroyed => {} - } + assert_eq!(FOO.state(), LocalKeyState::Valid); } } } fn main() { thread::spawn(|| { - FOO.with(|_| {}); + Foo::init(); }).join().unwrap(); } From 4998b232307d4445e35be225e8de3d8a38ee6450 Mon Sep 17 00:00:00 2001 From: Joshua Liebow-Feeser Date: Wed, 16 Aug 2017 15:12:36 -0400 Subject: [PATCH 2/4] std::thread::AccessError: Add methods to determine source of error --- src/libstd/thread/local.rs | 38 ++++++++++++++++++++++++++++++++++---- 1 file changed, 34 insertions(+), 4 deletions(-) diff --git a/src/libstd/thread/local.rs b/src/libstd/thread/local.rs index 558065aa48817..0de6648a424af 100644 --- a/src/libstd/thread/local.rs +++ b/src/libstd/thread/local.rs @@ -276,7 +276,33 @@ pub enum LocalKeyState { reason = "state querying was recently added", issue = "27716")] pub struct AccessError { - _private: (), + // whether the error was due to the key being in the Initializing + // state - false means the key was in the Destroyed state + init: bool, +} + +impl AccessError { + /// Determines whether the `AccessError` was due to the key being initialized. + /// + /// If `is_initializing` returns true, this `AccessError` was returned because + /// the key was in the `Initializing` state when it was accessed. + #[unstable(feature = "thread_local_state", + reason = "state querying was recently added", + issue = "27716")] + pub fn is_initializing(&self) -> bool { + self.init + } + + /// Determines whether the `AccessError` was due to the key being destroyed. + /// + /// If `is_destroyed` returns true, this `AccessError` was returned because + /// the key was in the `Destroyed` state when it was accessed. + #[unstable(feature = "thread_local_state", + reason = "state querying was recently added", + issue = "27716")] + pub fn is_destroyed(&self) -> bool { + !self.init + } } #[unstable(feature = "thread_local_state", @@ -293,7 +319,11 @@ impl fmt::Debug for AccessError { issue = "27716")] impl fmt::Display for AccessError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - fmt::Display::fmt("already destroyed", f) + if self.init { + fmt::Display::fmt("currently being initialized", f) + } else { + fmt::Display::fmt("already destroyed", f) + } } } @@ -429,8 +459,8 @@ impl LocalKey { // not to enter this else block in the recursive call. self.try_with(f) } - LocalKeyState::Initializing | - LocalKeyState::Destroyed => Err(AccessError { _private: ()}), + LocalKeyState::Initializing => Err(AccessError { init: true }), + LocalKeyState::Destroyed => Err(AccessError { init: false }), LocalKeyState::Valid => unreachable!(), } } From f201eedd687b184850104fd58ca21dc92740520b Mon Sep 17 00:00:00 2001 From: Joshua Liebow-Feeser Date: Wed, 16 Aug 2017 16:32:59 -0400 Subject: [PATCH 3/4] std::thread::LocalKeyState: Update compile and run tests --- src/libstd/thread/local.rs | 114 +++++++++++-------------- src/test/compile-fail/issue-43733-2.rs | 2 + src/test/compile-fail/issue-43733.rs | 32 +++++-- src/test/run-fail/tls-init-on-init.rs | 35 ++++++++ 4 files changed, 111 insertions(+), 72 deletions(-) create mode 100644 src/test/run-fail/tls-init-on-init.rs diff --git a/src/libstd/thread/local.rs b/src/libstd/thread/local.rs index 0de6648a424af..e14bdd27ea1fa 100644 --- a/src/libstd/thread/local.rs +++ b/src/libstd/thread/local.rs @@ -192,7 +192,7 @@ macro_rules! __thread_local_inner { static __KEY: $crate::thread::__OsLocalKeyInner<$t> = $crate::thread::__OsLocalKeyInner::new(); - unsafe fn __getit() -> &'static Option<$t> { __KEY.get() } + unsafe fn __getit() -> &'static $crate::option::Option<$t> { __KEY.get() } unsafe fn __get_state() -> $crate::thread::LocalKeyState { __KEY.get_state() } @@ -504,39 +504,33 @@ pub mod fast { } } - pub fn get(&self) -> &'static Option { - unsafe { &*self.inner.get() } + pub unsafe fn get(&self) -> &'static Option { + &*self.inner.get() } - pub fn get_state(&self) -> LocalKeyState { - unsafe { *self.state.get() } + pub unsafe fn get_state(&self) -> LocalKeyState { + *self.state.get() } - pub fn pre_init(&self) { - unsafe { - // It's critical that we set the state to Initializing before - // registering destructors - if registering destructors causes - // allocation, and the global allocator uses TLS, then the - // allocator needs to be able to detect that the TLS is in - // the Initializing state and perform appropriate fallback - // logic rather than recursing infinitely. - *self.state.get() = LocalKeyState::Initializing; - self.register_dtor(); - } + pub unsafe fn pre_init(&self) { + // It's critical that we set the state to Initializing before + // registering destructors - if registering destructors causes + // allocation, and the global allocator uses TLS, then the + // allocator needs to be able to detect that the TLS is in + // the Initializing state and perform appropriate fallback + // logic rather than recursing infinitely. + *self.state.get() = LocalKeyState::Initializing; + self.register_dtor(); } - pub fn post_init(&self, val: T) { - unsafe { - *self.inner.get() = Some(val); - *self.state.get() = LocalKeyState::Valid; - } + pub unsafe fn post_init(&self, val: T) { + *self.inner.get() = Some(val); + *self.state.get() = LocalKeyState::Valid; } - pub fn rollback_init(&self) { - unsafe { - *self.inner.get() = None; - *self.state.get() = LocalKeyState::Uninitialized; - } + pub unsafe fn rollback_init(&self) { + *self.inner.get() = None; + *self.state.get() = LocalKeyState::Uninitialized; } unsafe fn register_dtor(&self) { @@ -605,54 +599,46 @@ pub mod os { } } - pub fn get(&self) -> &'static Option { - unsafe { - match *self.state.get() { - LocalKeyState::Valid => { - // Since the state is Valid, we know that os points to - // an allocated Value. - let ptr = self.os.get() as *mut Value; - debug_assert!(!ptr.is_null()); - &*(*ptr).value.get() - } - _ => { - // The dummy_value is guaranteed to always be None. This - // allows us to avoid allocating if the state isn't Valid. - // This is critical because it means that an allocator can - // call try_with and detect that the key is in state - // Initializing without recursing infinitely. - &*self.dummy_value.get() - } + pub unsafe fn get(&self) -> &'static Option { + match *self.state.get() { + LocalKeyState::Valid => { + // Since the state is Valid, we know that os points to + // an allocated Value. + let ptr = self.os.get() as *mut Value; + debug_assert!(!ptr.is_null()); + &*(*ptr).value.get() + } + _ => { + // The dummy_value is guaranteed to always be None. This + // allows us to avoid allocating if the state isn't Valid. + // This is critical because it means that an allocator can + // call try_with and detect that the key is in state + // Initializing without recursing infinitely. + &*self.dummy_value.get() } } } - pub fn get_state(&self) -> LocalKeyState { - unsafe { *self.state.get() } + pub unsafe fn get_state(&self) -> LocalKeyState { + *self.state.get() } - pub fn pre_init(&self) { - unsafe { - *self.state.get() = LocalKeyState::Initializing; - } + pub unsafe fn pre_init(&self) { + *self.state.get() = LocalKeyState::Initializing; } - pub fn rollback_init(&self) { - unsafe { - *self.state.get() = LocalKeyState::Uninitialized; - } + pub unsafe fn rollback_init(&self) { + *self.state.get() = LocalKeyState::Uninitialized; } - pub fn post_init(&self, val: T) { - unsafe { - let ptr: Box> = box Value { - key: &*(self as *const _), - value: UnsafeCell::new(Some(val)), - }; - let ptr = Box::into_raw(ptr); - self.os.set(ptr as *mut u8); - *self.state.get() = LocalKeyState::Valid; - } + pub unsafe fn post_init(&self, val: T) { + let ptr: Box> = box Value { + key: &*(self as *const _), + value: UnsafeCell::new(Some(val)), + }; + let ptr = Box::into_raw(ptr); + self.os.set(ptr as *mut u8); + *self.state.get() = LocalKeyState::Valid; } } diff --git a/src/test/compile-fail/issue-43733-2.rs b/src/test/compile-fail/issue-43733-2.rs index 3dff34c2ebb12..4f1126ea8dcfe 100644 --- a/src/test/compile-fail/issue-43733-2.rs +++ b/src/test/compile-fail/issue-43733-2.rs @@ -16,6 +16,7 @@ #[cfg(not(target_thread_local))] struct Key { _data: std::cell::UnsafeCell>, + _state: std::cell::UnsafeCell, _flag: std::cell::Cell, } @@ -34,6 +35,7 @@ use std::thread::__FastLocalKeyInner as Key; static __KEY: Key<()> = Key::new(); //~^ ERROR `std::cell::UnsafeCell>: std::marker::Sync` is not satisfied +//~| ERROR `std::cell::UnsafeCell: std::marker::Sync` is not satisfied //~| ERROR `std::cell::Cell: std::marker::Sync` is not satisfied fn main() {} diff --git a/src/test/compile-fail/issue-43733.rs b/src/test/compile-fail/issue-43733.rs index a4aad21a9f832..6bef8a8fa1757 100644 --- a/src/test/compile-fail/issue-43733.rs +++ b/src/test/compile-fail/issue-43733.rs @@ -9,11 +9,10 @@ // except according to those terms. #![feature(const_fn, drop_types_in_const)] -#![feature(cfg_target_thread_local, thread_local_internals)] +#![feature(cfg_target_thread_local, thread_local_internals, thread_local_state)] type Foo = std::cell::RefCell; -#[cfg(target_thread_local)] static __KEY: std::thread::__FastLocalKeyInner = std::thread::__FastLocalKeyInner::new(); @@ -21,16 +20,33 @@ static __KEY: std::thread::__FastLocalKeyInner = static __KEY: std::thread::__OsLocalKeyInner = std::thread::__OsLocalKeyInner::new(); -fn __getit() -> std::option::Option< - &'static std::cell::UnsafeCell< - std::option::Option>> -{ +fn __getit() -> &'static std::option::Option { __KEY.get() //~ ERROR invocation of unsafe method requires unsafe } +fn __get_state() -> std::thread::LocalKeyState { + __KEY.get_state() //~ ERROR invocation of unsafe method requires unsafe +} + +fn __pre_init() { + __KEY.pre_init() //~ ERROR invocation of unsafe method requires unsafe +} + +fn __post_init(val: Foo) { + __KEY.post_init(val) //~ ERROR invocation of unsafe method requires unsafe +} + +fn __rollback_init() { + __KEY.rollback_init() //~ ERROR invocation of unsafe method requires unsafe +} + static FOO: std::thread::LocalKey = - std::thread::LocalKey::new(__getit, Default::default); -//~^ ERROR call to unsafe function requires unsafe + std::thread::LocalKey::new(__getit, //~ ERROR call to unsafe function requires unsafe + __get_state, + __pre_init, + Default::default, + __post_init, + __rollback_init); fn main() { FOO.with(|foo| println!("{}", foo.borrow())); diff --git a/src/test/run-fail/tls-init-on-init.rs b/src/test/run-fail/tls-init-on-init.rs new file mode 100644 index 0000000000000..1deef92b823bb --- /dev/null +++ b/src/test/run-fail/tls-init-on-init.rs @@ -0,0 +1,35 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// ignore-emscripten no threads support + +// Can't include entire panic message because it'd exceed tidy's 100-character line length limit. +// error-pattern:cannot access a TLS value while it is being initialized or during or after + +#![feature(thread_local_state)] + +use std::thread::{self, LocalKeyState}; + +struct Foo; + +thread_local!(static FOO: Foo = Foo::init()); + +impl Foo { + fn init() -> Foo { + FOO.with(|_| {}); + Foo + } +} + +fn main() { + thread::spawn(|| { + FOO.with(|_| {}); + }).join().unwrap(); +} From fe1894a4ca9c5f98d9648f633b518f789363e405 Mon Sep 17 00:00:00 2001 From: Joshua Liebow-Feeser Date: Wed, 6 Sep 2017 13:06:22 -0700 Subject: [PATCH 4/4] std::thread::LocalKeyState: Refactor to use fewer callbacks --- src/libstd/thread/local.rs | 313 ++++++++---------- src/libstd/thread/mod.rs | 2 + src/test/compile-fail/issue-43733-2.rs | 6 +- src/test/compile-fail/issue-43733.rs | 28 +- .../compile-fail/macro-local-data-key-priv.rs | 2 +- 5 files changed, 156 insertions(+), 195 deletions(-) diff --git a/src/libstd/thread/local.rs b/src/libstd/thread/local.rs index e14bdd27ea1fa..426d0d39de9d4 100644 --- a/src/libstd/thread/local.rs +++ b/src/libstd/thread/local.rs @@ -13,6 +13,7 @@ #![unstable(feature = "thread_local_internals", issue = "0")] use fmt; +use cell::UnsafeCell; /// A thread local storage key which owns its contents. /// @@ -103,26 +104,11 @@ pub struct LocalKey { // Get a reference to the value. get can be called at any time during the // value's life cycle (Uninitialized, Initializing, Valid, and Destroyed). - // If it returns Some, then the key value is in the Valid state. Otherwise, - // if is in one of the other three states (use the get_state callback to - // check which one). - get: unsafe fn() -> &'static Option, - // Query the value's current state. - get_state: unsafe fn() -> LocalKeyState, - // Begin initialization, moving the value into the Initializing state, and - // performing any platform-specific initialization. - // - // After pre_init has been called, it must be safe to call rollback_init - // and then pre_init an arbitrary number of times - if init panics, we - // call rollback_init to move back to the Uninitialized state, and we - // may try to initialize again in a future call to with or try_with. - pre_init: unsafe fn(), - // Finalize initialization, using the provided value as the initial value - // for this key. Move the value into the Initialized state. - post_init: unsafe fn(T), - // Roll back a failed initialization (caused by init panicking). Move the - // value back into the Uninitialized state. - rollback_init: unsafe fn(), + get: unsafe fn() -> &'static UnsafeCell>, + + // Register a destructor for the value. register_dtor should be called + // after the key has been transitioned into the Valid state. + register_dtor: unsafe fn(), } #[stable(feature = "std_debug", since = "1.16.0")] @@ -181,8 +167,8 @@ macro_rules! thread_local { #[allow_internal_unstable] #[allow_internal_unsafe] macro_rules! __thread_local_inner { - ($(#[$attr:meta])* $vis:vis $name:ident, $t:ty, $init:expr) => { - $(#[$attr])* $vis static $name: $crate::thread::LocalKey<$t> = { + (@key $(#[$attr:meta])* $vis:vis $name:ident, $t:ty, $init:expr) => { + { #[thread_local] #[cfg(target_thread_local)] static __KEY: $crate::thread::__FastLocalKeyInner<$t> = @@ -192,30 +178,46 @@ macro_rules! __thread_local_inner { static __KEY: $crate::thread::__OsLocalKeyInner<$t> = $crate::thread::__OsLocalKeyInner::new(); - unsafe fn __getit() -> &'static $crate::option::Option<$t> { __KEY.get() } - - unsafe fn __get_state() -> $crate::thread::LocalKeyState { __KEY.get_state() } + unsafe fn __get() -> &'static $crate::cell::UnsafeCell<$crate::thread:: + __LocalKeyValue<$t>> { __KEY.get() } - unsafe fn __pre_init() { __KEY.pre_init() } + // Only the fast implementation needs a destructor explicitly + // registered - the OS implementation's destructor is registered + // automatically by std::sys_common::thread_local::StaticKey. + #[cfg(target_thread_local)] + unsafe fn __register_dtor() { __KEY.register_dtor() } + #[cfg(not(target_thread_local))] + unsafe fn __register_dtor() {} + #[inline] fn __init() -> $t { $init } - unsafe fn __post_init(val: $t) { __KEY.post_init(val) } - - unsafe fn __rollback_init() { __KEY.rollback_init() } + unsafe { $crate::thread::LocalKey::new(__get, __register_dtor, __init) } + } + }; + ($(#[$attr:meta])* $vis:vis $name:ident, $t:ty, $init:expr) => { + #[cfg(stage0)] + $(#[$attr])* $vis static $name: $crate::thread::LocalKey<$t> = + __thread_local_inner!(@key $(#[$attr])* $vis $name, $t, $init); - unsafe { - $crate::thread::LocalKey::new(__getit, - __get_state, - __pre_init, - __init, - __post_init, - __rollback_init) - } - }; + #[cfg(not(stage0))] + $(#[$attr])* $vis const $name: $crate::thread::LocalKey<$t> = + __thread_local_inner!(@key $(#[$attr])* $vis $name, $t, $init); } } +#[unstable(feature = "thread_local_state", + reason = "state querying was recently added", + issue = "27716")] +#[doc(hidden)] +#[derive(Debug, Eq, PartialEq, Copy, Clone)] +pub enum LocalKeyValue { + Uninitialized, + Initializing, + Valid(T), + Destroyed, +} + /// Indicator of the state of a thread local storage key. #[unstable(feature = "thread_local_state", reason = "state querying was recently added", @@ -245,6 +247,11 @@ pub enum LocalKeyState { /// Keys in the `Initializing` state will trigger a panic when accessed via /// [`with`]. /// + /// Note to allocator implementors: On some platforms, initializing a TLS + /// key causes allocation to happen _before_ the key is moved into the + /// `Initializing` state. Thus, it is not necessarily safe for a global + /// allocator to use this TLS mechanism. + /// /// [`try_with`]: ../../std/thread/struct.LocalKey.html#method.try_with /// [`with`]: ../../std/thread/struct.LocalKey.html#method.with Initializing, @@ -332,20 +339,12 @@ impl LocalKey { #[unstable(feature = "thread_local_internals", reason = "recently added to create a key", issue = "0")] - pub const unsafe fn new(get: unsafe fn() -> &'static Option, - get_state: unsafe fn() -> LocalKeyState, - pre_init: unsafe fn(), - init: fn() -> T, - post_init: unsafe fn(T), - rollback_init: unsafe fn()) - -> LocalKey { + pub const unsafe fn new(get: unsafe fn() -> &'static UnsafeCell>, + register_dtor: unsafe fn(), init: fn() -> T) -> LocalKey { LocalKey { - get, - get_state, - pre_init, init, - post_init, - rollback_init, + get, + register_dtor, } } @@ -366,38 +365,49 @@ impl LocalKey { or during or after it is destroyed") } - unsafe fn init(&self) { - struct RollbackOnPanic { + unsafe fn init(&self, slot: &UnsafeCell>) { + struct RollbackOnPanic<'a, T: 'a> { panicking: bool, - rollback: unsafe fn(), + slot: &'a UnsafeCell>, } - impl Drop for RollbackOnPanic { + impl<'a, T: 'a> Drop for RollbackOnPanic<'a, T> { fn drop(&mut self) { if self.panicking { - unsafe { (self.rollback)() } + unsafe { *self.slot.get() = LocalKeyValue::Uninitialized } } } } let mut reset = RollbackOnPanic { panicking: true, - rollback: self.rollback_init, + slot, }; - // Transition into Valid, perform any pre-init work (e.g., registering - // destructors). If self.init panics, the Drop method of RollbackOnPanic - // will call self.rollback, rolling back the state to Uninitialized. - // - // Note that once self.pre_init has been called once, it must be safe to call - // self.rollback_init and then self.pre_init an arbitrary number of times - if - // self.init panics, a future call to with or try_with will again try to - // initialize the value, causing this routine to be run. - (self.pre_init)(); + // Transition into Initializing in preparation for calling self.init. + *slot.get() = LocalKeyValue::Initializing; + // Call the initializer; if this panics, the Drop method of RollbackOnPanic + // will roll back to LocalKeyValue::Uninitialized. let val = (self.init)(); reset.panicking = false; - // Transition into Valid, set the value to val. - (self.post_init)(val); + // If we get here, self.init didn't panic, so move into state Valid and + // register the destructor. + *slot.get() = LocalKeyValue::Valid(val); + // NOTE(joshlf): Calling self.register_dtor here (after transitioning + // into Initializing and then into Valid) guarantees that, for the fast + // implementation, no allocation happens until after a key has transitioned + // into Initializing. This allows a global allocator to make use of this + // TLS implementation and still be able to detect a recusive call to alloc. + // + // Unfortunately, no such guarantee exists on platforms that cannot use + // the fast implementation - std::sys_common::thread_local::StaticKey's + // get and set methods both allocate under the hood, and the only way to + // get or modify thread-local data is to use those methods. Thus, we cannot + // guarantee that TLS is always safe to use in the implementation of global + // allocators. If somebody in the future figures out a way to postpone + // allocation until after the transition to Initializing, that would be + // great. + (self.register_dtor)(); } /// Query the current state of this key. @@ -429,7 +439,14 @@ impl LocalKey { reason = "state querying was recently added", issue = "27716")] pub fn state(&'static self) -> LocalKeyState { - unsafe { (self.get_state)() } + unsafe { + match &*(self.get)().get() { + &LocalKeyValue::Uninitialized => LocalKeyState::Uninitialized, + &LocalKeyValue::Initializing => LocalKeyState::Initializing, + &LocalKeyValue::Valid(_) => LocalKeyState::Valid, + &LocalKeyValue::Destroyed => LocalKeyState::Destroyed, + } + } } /// Acquires a reference to the value in this TLS key. @@ -449,19 +466,23 @@ impl LocalKey { pub fn try_with(&'static self, f: F) -> Result where F: FnOnce(&T) -> R { unsafe { - if let &Some(ref inner) = (self.get)() { + let slot = (self.get)(); + if let &LocalKeyValue::Valid(ref inner) = &*slot.get() { + // Do this in a separate if branch (rather than just part of the + // match statement in the else block) to increase the performance + // of the fast path. Ok(f(inner)) } else { - match self.state() { - LocalKeyState::Uninitialized => { - self.init(); + match *slot.get() { + LocalKeyValue::Uninitialized => { + self.init(slot); // Now that the value is initialized, we're guaranteed // not to enter this else block in the recursive call. self.try_with(f) } - LocalKeyState::Initializing => Err(AccessError { init: true }), - LocalKeyState::Destroyed => Err(AccessError { init: false }), - LocalKeyState::Valid => unreachable!(), + LocalKeyValue::Initializing => Err(AccessError { init: true }), + LocalKeyValue::Destroyed => Err(AccessError { init: false }), + LocalKeyValue::Valid(_) => unreachable!(), } } } @@ -474,18 +495,15 @@ pub mod fast { use cell::{Cell, UnsafeCell}; use fmt; use mem; - use ptr; use sys::fast_thread_local::register_dtor; - use thread::LocalKeyState; + use thread::__LocalKeyValue; pub struct Key { - inner: UnsafeCell>, - state: UnsafeCell, + inner: UnsafeCell<__LocalKeyValue>, - // Keep track of whether the destructor has been registered (this is - // not the same thing as not being in the Uninitialized state - we - // can transition back into that state in rollback_init). Remember - // that this variable is thread-local, not global. + // Keep track of whether the destructor has been registered (it's + // registered by LocalKey::init after the initializer successfully + // returns). Remember that this variable is thread-local, not global. dtor_registered: Cell, } @@ -498,42 +516,16 @@ pub mod fast { impl Key { pub const fn new() -> Key { Key { - inner: UnsafeCell::new(None), - state: UnsafeCell::new(LocalKeyState::Uninitialized), + inner: UnsafeCell::new(__LocalKeyValue::Uninitialized), dtor_registered: Cell::new(false), } } - pub unsafe fn get(&self) -> &'static Option { - &*self.inner.get() + pub unsafe fn get(&self) -> &'static UnsafeCell<__LocalKeyValue> { + &*(&self.inner as *const _) } - pub unsafe fn get_state(&self) -> LocalKeyState { - *self.state.get() - } - - pub unsafe fn pre_init(&self) { - // It's critical that we set the state to Initializing before - // registering destructors - if registering destructors causes - // allocation, and the global allocator uses TLS, then the - // allocator needs to be able to detect that the TLS is in - // the Initializing state and perform appropriate fallback - // logic rather than recursing infinitely. - *self.state.get() = LocalKeyState::Initializing; - self.register_dtor(); - } - - pub unsafe fn post_init(&self, val: T) { - *self.inner.get() = Some(val); - *self.state.get() = LocalKeyState::Valid; - } - - pub unsafe fn rollback_init(&self) { - *self.inner.get() = None; - *self.state.get() = LocalKeyState::Uninitialized; - } - - unsafe fn register_dtor(&self) { + pub unsafe fn register_dtor(&self) { if !mem::needs_drop::() || self.dtor_registered.get() { return } @@ -546,12 +538,10 @@ pub mod fast { unsafe extern fn destroy_value(ptr: *mut u8) { let ptr = ptr as *mut Key; - let tmp = ptr::read((*ptr).inner.get()); - // Set inner to None and set the state to Destroyed before we drop tmp - // so that a recursive call to get (called from the destructor) will + // Set inner to Destroyed before we drop tmp so that a + // recursive call to get (called from the destructor) will // be able to detect that the value is already being dropped. - ptr::write((*ptr).inner.get(), None); - *(*ptr).state.get() = LocalKeyState::Destroyed; + let tmp = mem::replace(&mut *(*ptr).inner.get(), __LocalKeyValue::Destroyed); drop(tmp); } } @@ -560,21 +550,17 @@ pub mod fast { pub mod os { use cell::UnsafeCell; use fmt; + use ptr; use sys_common::thread_local::StaticKey as OsStaticKey; - use thread::LocalKeyState; + use thread::__LocalKeyValue; pub struct Key { // OS-TLS key that we'll use to key off. os: OsStaticKey, - // The state of this key. os is only guaranteed to point to an - // allocated Value if this state is Valid. - state: UnsafeCell, - // Store a value here in addition to in the OsStaticKey itself so - // that if the OsStaticKey hasn't yet been allocated, we still have - // an Option that we can return a reference to. Unlike the real - // value, this value will always be None (because when the real - // value is initialized, we just use it directly). - dummy_value: UnsafeCell>, + + // Dummy value that can be used once the Value has already been + // deallocated. Its value is always Destroyed. + dummy_destroyed: UnsafeCell<__LocalKeyValue>, } impl fmt::Debug for Key { @@ -587,67 +573,56 @@ pub mod os { struct Value { key: &'static Key, - value: UnsafeCell>, + value: UnsafeCell<__LocalKeyValue>, } impl Key { pub const fn new() -> Key { Key { os: OsStaticKey::new(Some(destroy_value::)), - state: UnsafeCell::new(LocalKeyState::Uninitialized), - dummy_value: UnsafeCell::new(None), + dummy_destroyed: UnsafeCell::new(__LocalKeyValue::Destroyed), } } - pub unsafe fn get(&self) -> &'static Option { - match *self.state.get() { - LocalKeyState::Valid => { - // Since the state is Valid, we know that os points to - // an allocated Value. - let ptr = self.os.get() as *mut Value; - debug_assert!(!ptr.is_null()); - &*(*ptr).value.get() - } - _ => { - // The dummy_value is guaranteed to always be None. This - // allows us to avoid allocating if the state isn't Valid. - // This is critical because it means that an allocator can - // call try_with and detect that the key is in state - // Initializing without recursing infinitely. - &*self.dummy_value.get() + pub unsafe fn get(&'static self) -> &'static UnsafeCell<__LocalKeyValue> { + let ptr = self.os.get() as *mut Value; + if !ptr.is_null() { + if ptr as usize == 1 { + // The destructor was already called (and set self.os to 1). + return &self.dummy_destroyed; } + return &(*ptr).value; } - } - - pub unsafe fn get_state(&self) -> LocalKeyState { - *self.state.get() - } - - pub unsafe fn pre_init(&self) { - *self.state.get() = LocalKeyState::Initializing; - } - - pub unsafe fn rollback_init(&self) { - *self.state.get() = LocalKeyState::Uninitialized; - } - - pub unsafe fn post_init(&self, val: T) { + // If the lookup returned null, we haven't initialized our own + // local copy, so do that now. let ptr: Box> = box Value { - key: &*(self as *const _), - value: UnsafeCell::new(Some(val)), - }; + key: &*(self as *const _), + value: UnsafeCell::new(__LocalKeyValue::Uninitialized), + }; let ptr = Box::into_raw(ptr); self.os.set(ptr as *mut u8); - *self.state.get() = LocalKeyState::Valid; + &(*ptr).value } } unsafe extern fn destroy_value(ptr: *mut u8) { + // The OS TLS ensures that this key contains a NULL value when this + // destructor starts to run. We set it back to a sentinel value of 1 to + // ensure that any future calls to `get` for this thread will return + // `None`. + // + // Note that to prevent an infinite loop we reset it back to null right + // before we return from the destructor ourselves. + // + // FIXME: Setting this back to null means that, after this destructor + // returns, future accesses of this key will think that the state is + // Uninitialized (rather than Destroyed). We should figure out a way + // to fix this. let ptr = Box::from_raw(ptr as *mut Value); - // Set the state to Destroyed before we drop ptr so that any recursive - // calls to get can detect that the destructor is already being called. - *ptr.key.state.get() = LocalKeyState::Destroyed; + let key = ptr.key; + key.os.set(1 as *mut u8); drop(ptr); + key.os.set(ptr::null_mut()); } } diff --git a/src/libstd/thread/mod.rs b/src/libstd/thread/mod.rs index 4912ff93abdb3..638557c305afe 100644 --- a/src/libstd/thread/mod.rs +++ b/src/libstd/thread/mod.rs @@ -196,6 +196,8 @@ pub use self::local::{LocalKey, LocalKeyState, AccessError}; // where fast TLS was not available; end-user code is compiled with fast TLS // where available, but both are needed. +#[unstable(feature = "libstd_thread_internals", issue = "0")] +#[doc(hidden)] pub use self::local::LocalKeyValue as __LocalKeyValue; #[unstable(feature = "libstd_thread_internals", issue = "0")] #[cfg(target_thread_local)] #[doc(hidden)] pub use self::local::fast::Key as __FastLocalKeyInner; diff --git a/src/test/compile-fail/issue-43733-2.rs b/src/test/compile-fail/issue-43733-2.rs index 4f1126ea8dcfe..85415dd4d2017 100644 --- a/src/test/compile-fail/issue-43733-2.rs +++ b/src/test/compile-fail/issue-43733-2.rs @@ -15,8 +15,7 @@ // a custom non-`Sync` type to fake the same error. #[cfg(not(target_thread_local))] struct Key { - _data: std::cell::UnsafeCell>, - _state: std::cell::UnsafeCell, + _data: std::cell::UnsafeCell, _flag: std::cell::Cell, } @@ -34,8 +33,7 @@ impl Key { use std::thread::__FastLocalKeyInner as Key; static __KEY: Key<()> = Key::new(); -//~^ ERROR `std::cell::UnsafeCell>: std::marker::Sync` is not satisfied -//~| ERROR `std::cell::UnsafeCell: std::marker::Sync` is not satisfied +//~^ ERROR `std::cell::UnsafeCell>: std::marker::Sync` is not //~| ERROR `std::cell::Cell: std::marker::Sync` is not satisfied fn main() {} diff --git a/src/test/compile-fail/issue-43733.rs b/src/test/compile-fail/issue-43733.rs index 6bef8a8fa1757..dbcc6a39d524f 100644 --- a/src/test/compile-fail/issue-43733.rs +++ b/src/test/compile-fail/issue-43733.rs @@ -20,33 +20,19 @@ static __KEY: std::thread::__FastLocalKeyInner = static __KEY: std::thread::__OsLocalKeyInner = std::thread::__OsLocalKeyInner::new(); -fn __getit() -> &'static std::option::Option { +fn __get() -> &'static std::cell::UnsafeCell> { __KEY.get() //~ ERROR invocation of unsafe method requires unsafe } -fn __get_state() -> std::thread::LocalKeyState { - __KEY.get_state() //~ ERROR invocation of unsafe method requires unsafe -} - -fn __pre_init() { - __KEY.pre_init() //~ ERROR invocation of unsafe method requires unsafe -} - -fn __post_init(val: Foo) { - __KEY.post_init(val) //~ ERROR invocation of unsafe method requires unsafe -} - -fn __rollback_init() { - __KEY.rollback_init() //~ ERROR invocation of unsafe method requires unsafe +fn __register_dtor() { + #[cfg(target_thread_local)] + __KEY.register_dtor() //~ ERROR invocation of unsafe method requires unsafe } static FOO: std::thread::LocalKey = - std::thread::LocalKey::new(__getit, //~ ERROR call to unsafe function requires unsafe - __get_state, - __pre_init, - Default::default, - __post_init, - __rollback_init); + std::thread::LocalKey::new(__get, //~ ERROR call to unsafe function requires unsafe + __register_dtor, + Default::default); fn main() { FOO.with(|foo| println!("{}", foo.borrow())); diff --git a/src/test/compile-fail/macro-local-data-key-priv.rs b/src/test/compile-fail/macro-local-data-key-priv.rs index 3818c8f7754a6..3ae629cd89529 100644 --- a/src/test/compile-fail/macro-local-data-key-priv.rs +++ b/src/test/compile-fail/macro-local-data-key-priv.rs @@ -16,5 +16,5 @@ mod bar { fn main() { bar::baz.with(|_| ()); - //~^ ERROR static `baz` is private + //~^ ERROR `baz` is private }