Skip to content
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.

Commit 3c805ce

Browse files
committedJun 20, 2019
Auto merge of #60341 - mtak-:macos-tlv-workaround, r=alexcrichton
macos tlv workaround fixes: #60141 Includes: * remove dead code: `requires_move_before_drop`. This hasn't been needed for a while now (oops I should have removed it in #57655) * redox had a copy of `fast::Key` (not sure why?). That has been removed. * Perform a `read_volatile` on OSX to reduce `tlv_get_addr` calls per `__getit` from (4-2 depending on context) to 1. `tlv_get_addr` is relatively expensive (~1.5ns on my machine). Previously, in contexts where `__getit` was inlined, 4 calls to `tlv_get_addr` were performed per lookup. For some reason when `__getit` is not inlined this is reduced to 2x - and performance improves to match. After this PR, I have only ever seen 1x call to `tlv_get_addr` per `__getit`, and macos now benefits from situations where `__getit` is inlined. I'm not sure if the `read_volatile(&&__KEY)` trick is working around an LLVM bug, or a rustc bug, or neither. r? @alexcrichton
2 parents 7d10761 + b148c25 commit 3c805ce

File tree

7 files changed

+196
-237
lines changed

7 files changed

+196
-237
lines changed
 
Lines changed: 1 addition & 108 deletions
Original file line numberDiff line numberDiff line change
@@ -1,111 +1,4 @@
11
#![cfg(target_thread_local)]
22
#![unstable(feature = "thread_local_internals", issue = "0")]
33

4-
use crate::cell::{Cell, UnsafeCell};
5-
use crate::mem;
6-
use crate::ptr;
7-
8-
9-
pub struct Key<T> {
10-
inner: UnsafeCell<Option<T>>,
11-
12-
// Metadata to keep track of the state of the destructor. Remember that
13-
// these variables are thread-local, not global.
14-
dtor_registered: Cell<bool>,
15-
dtor_running: Cell<bool>,
16-
}
17-
18-
unsafe impl<T> Sync for Key<T> { }
19-
20-
impl<T> Key<T> {
21-
pub const fn new() -> Key<T> {
22-
Key {
23-
inner: UnsafeCell::new(None),
24-
dtor_registered: Cell::new(false),
25-
dtor_running: Cell::new(false)
26-
}
27-
}
28-
29-
pub fn get(&'static self) -> Option<&'static UnsafeCell<Option<T>>> {
30-
unsafe {
31-
if mem::needs_drop::<T>() && self.dtor_running.get() {
32-
return None
33-
}
34-
self.register_dtor();
35-
}
36-
Some(&self.inner)
37-
}
38-
39-
unsafe fn register_dtor(&self) {
40-
if !mem::needs_drop::<T>() || self.dtor_registered.get() {
41-
return
42-
}
43-
44-
register_dtor(self as *const _ as *mut u8,
45-
destroy_value::<T>);
46-
self.dtor_registered.set(true);
47-
}
48-
}
49-
50-
pub unsafe fn register_dtor(t: *mut u8, dtor: unsafe extern fn(*mut u8)) {
51-
// The fallback implementation uses a vanilla OS-based TLS key to track
52-
// the list of destructors that need to be run for this thread. The key
53-
// then has its own destructor which runs all the other destructors.
54-
//
55-
// The destructor for DTORS is a little special in that it has a `while`
56-
// loop to continuously drain the list of registered destructors. It
57-
// *should* be the case that this loop always terminates because we
58-
// provide the guarantee that a TLS key cannot be set after it is
59-
// flagged for destruction.
60-
use crate::sys_common::thread_local as os;
61-
62-
static DTORS: os::StaticKey = os::StaticKey::new(Some(run_dtors));
63-
type List = Vec<(*mut u8, unsafe extern fn(*mut u8))>;
64-
if DTORS.get().is_null() {
65-
let v: Box<List> = box Vec::new();
66-
DTORS.set(Box::into_raw(v) as *mut u8);
67-
}
68-
let list: &mut List = &mut *(DTORS.get() as *mut List);
69-
list.push((t, dtor));
70-
71-
unsafe extern fn run_dtors(mut ptr: *mut u8) {
72-
while !ptr.is_null() {
73-
let list: Box<List> = Box::from_raw(ptr as *mut List);
74-
for (ptr, dtor) in list.into_iter() {
75-
dtor(ptr);
76-
}
77-
ptr = DTORS.get();
78-
DTORS.set(ptr::null_mut());
79-
}
80-
}
81-
}
82-
83-
pub unsafe extern fn destroy_value<T>(ptr: *mut u8) {
84-
let ptr = ptr as *mut Key<T>;
85-
// Right before we run the user destructor be sure to flag the
86-
// destructor as running for this thread so calls to `get` will return
87-
// `None`.
88-
(*ptr).dtor_running.set(true);
89-
90-
// The macOS implementation of TLS apparently had an odd aspect to it
91-
// where the pointer we have may be overwritten while this destructor
92-
// is running. Specifically if a TLS destructor re-accesses TLS it may
93-
// trigger a re-initialization of all TLS variables, paving over at
94-
// least some destroyed ones with initial values.
95-
//
96-
// This means that if we drop a TLS value in place on macOS that we could
97-
// revert the value to its original state halfway through the
98-
// destructor, which would be bad!
99-
//
100-
// Hence, we use `ptr::read` on macOS (to move to a "safe" location)
101-
// instead of drop_in_place.
102-
if cfg!(target_os = "macos") {
103-
ptr::read((*ptr).inner.get());
104-
} else {
105-
ptr::drop_in_place((*ptr).inner.get());
106-
}
107-
}
108-
109-
pub fn requires_move_before_drop() -> bool {
110-
false
111-
}
4+
pub use crate::sys_common::thread_local::register_dtor_fallback as register_dtor;

‎src/libstd/sys/unix/fast_thread_local.rs

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -82,7 +82,3 @@ pub unsafe fn register_dtor(t: *mut u8, dtor: unsafe extern fn(*mut u8)) {
8282
}
8383
}
8484
}
85-
86-
pub fn requires_move_before_drop() -> bool {
87-
false
88-
}

‎src/libstd/sys/windows/fast_thread_local.rs

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,3 @@
22
#![cfg(target_thread_local)]
33

44
pub use crate::sys_common::thread_local::register_dtor_fallback as register_dtor;
5-
6-
pub fn requires_move_before_drop() -> bool {
7-
false
8-
}

‎src/libstd/thread/local.rs

Lines changed: 183 additions & 107 deletions
Original file line numberDiff line numberDiff line change
@@ -2,10 +2,7 @@
22
33
#![unstable(feature = "thread_local_internals", issue = "0")]
44

5-
use crate::cell::UnsafeCell;
65
use crate::fmt;
7-
use crate::hint;
8-
use crate::mem;
96

107
/// A thread local storage key which owns its contents.
118
///
@@ -92,10 +89,7 @@ pub struct LocalKey<T: 'static> {
9289
// trivially devirtualizable by LLVM because the value of `inner` never
9390
// changes and the constant should be readonly within a crate. This mainly
9491
// only runs into problems when TLS statics are exported across crates.
95-
inner: unsafe fn() -> Option<&'static UnsafeCell<Option<T>>>,
96-
97-
// initialization routine to invoke to create a value
98-
init: fn() -> T,
92+
inner: unsafe fn() -> Option<&'static T>,
9993
}
10094

10195
#[stable(feature = "std_debug", since = "1.16.0")]
@@ -159,10 +153,7 @@ macro_rules! __thread_local_inner {
159153
#[inline]
160154
fn __init() -> $t { $init }
161155

162-
unsafe fn __getit() -> $crate::option::Option<
163-
&'static $crate::cell::UnsafeCell<
164-
$crate::option::Option<$t>>>
165-
{
156+
unsafe fn __getit() -> $crate::option::Option<&'static $t> {
166157
#[cfg(all(target_arch = "wasm32", not(target_feature = "atomics")))]
167158
static __KEY: $crate::thread::__StaticLocalKeyInner<$t> =
168159
$crate::thread::__StaticLocalKeyInner::new();
@@ -182,11 +173,11 @@ macro_rules! __thread_local_inner {
182173
static __KEY: $crate::thread::__OsLocalKeyInner<$t> =
183174
$crate::thread::__OsLocalKeyInner::new();
184175

185-
__KEY.get()
176+
__KEY.get(__init)
186177
}
187178

188179
unsafe {
189-
$crate::thread::LocalKey::new(__getit, __init)
180+
$crate::thread::LocalKey::new(__getit)
190181
}
191182
}
192183
};
@@ -221,11 +212,9 @@ impl<T: 'static> LocalKey<T> {
221212
#[unstable(feature = "thread_local_internals",
222213
reason = "recently added to create a key",
223214
issue = "0")]
224-
pub const unsafe fn new(inner: unsafe fn() -> Option<&'static UnsafeCell<Option<T>>>,
225-
init: fn() -> T) -> LocalKey<T> {
215+
pub const unsafe fn new(inner: unsafe fn() -> Option<&'static T>) -> LocalKey<T> {
226216
LocalKey {
227217
inner,
228-
init,
229218
}
230219
}
231220

@@ -246,37 +235,6 @@ impl<T: 'static> LocalKey<T> {
246235
after it is destroyed")
247236
}
248237

249-
unsafe fn init(&self, slot: &UnsafeCell<Option<T>>) -> &T {
250-
// Execute the initialization up front, *then* move it into our slot,
251-
// just in case initialization fails.
252-
let value = (self.init)();
253-
let ptr = slot.get();
254-
255-
// note that this can in theory just be `*ptr = Some(value)`, but due to
256-
// the compiler will currently codegen that pattern with something like:
257-
//
258-
// ptr::drop_in_place(ptr)
259-
// ptr::write(ptr, Some(value))
260-
//
261-
// Due to this pattern it's possible for the destructor of the value in
262-
// `ptr` (e.g., if this is being recursively initialized) to re-access
263-
// TLS, in which case there will be a `&` and `&mut` pointer to the same
264-
// value (an aliasing violation). To avoid setting the "I'm running a
265-
// destructor" flag we just use `mem::replace` which should sequence the
266-
// operations a little differently and make this safe to call.
267-
mem::replace(&mut *ptr, Some(value));
268-
269-
// After storing `Some` we want to get a reference to the contents of
270-
// what we just stored. While we could use `unwrap` here and it should
271-
// always work it empirically doesn't seem to always get optimized away,
272-
// which means that using something like `try_with` can pull in
273-
// panicking code and cause a large size bloat.
274-
match *ptr {
275-
Some(ref x) => x,
276-
None => hint::unreachable_unchecked(),
277-
}
278-
}
279-
280238
/// Acquires a reference to the value in this TLS key.
281239
///
282240
/// This will lazily initialize the value if this thread has not referenced
@@ -293,13 +251,68 @@ impl<T: 'static> LocalKey<T> {
293251
F: FnOnce(&T) -> R,
294252
{
295253
unsafe {
296-
let slot = (self.inner)().ok_or(AccessError {
254+
let thread_local = (self.inner)().ok_or(AccessError {
297255
_private: (),
298256
})?;
299-
Ok(f(match *slot.get() {
300-
Some(ref inner) => inner,
301-
None => self.init(slot),
302-
}))
257+
Ok(f(thread_local))
258+
}
259+
}
260+
}
261+
262+
mod lazy {
263+
use crate::cell::UnsafeCell;
264+
use crate::mem;
265+
use crate::hint;
266+
267+
pub struct LazyKeyInner<T> {
268+
inner: UnsafeCell<Option<T>>,
269+
}
270+
271+
impl<T> LazyKeyInner<T> {
272+
pub const fn new() -> LazyKeyInner<T> {
273+
LazyKeyInner {
274+
inner: UnsafeCell::new(None),
275+
}
276+
}
277+
278+
pub unsafe fn get(&self) -> Option<&'static T> {
279+
(*self.inner.get()).as_ref()
280+
}
281+
282+
pub unsafe fn initialize<F: FnOnce() -> T>(&self, init: F) -> &'static T {
283+
// Execute the initialization up front, *then* move it into our slot,
284+
// just in case initialization fails.
285+
let value = init();
286+
let ptr = self.inner.get();
287+
288+
// note that this can in theory just be `*ptr = Some(value)`, but due to
289+
// the compiler will currently codegen that pattern with something like:
290+
//
291+
// ptr::drop_in_place(ptr)
292+
// ptr::write(ptr, Some(value))
293+
//
294+
// Due to this pattern it's possible for the destructor of the value in
295+
// `ptr` (e.g., if this is being recursively initialized) to re-access
296+
// TLS, in which case there will be a `&` and `&mut` pointer to the same
297+
// value (an aliasing violation). To avoid setting the "I'm running a
298+
// destructor" flag we just use `mem::replace` which should sequence the
299+
// operations a little differently and make this safe to call.
300+
mem::replace(&mut *ptr, Some(value));
301+
302+
// After storing `Some` we want to get a reference to the contents of
303+
// what we just stored. While we could use `unwrap` here and it should
304+
// always work it empirically doesn't seem to always get optimized away,
305+
// which means that using something like `try_with` can pull in
306+
// panicking code and cause a large size bloat.
307+
match *ptr {
308+
Some(ref x) => x,
309+
None => hint::unreachable_unchecked(),
310+
}
311+
}
312+
313+
#[allow(unused)]
314+
pub unsafe fn take(&mut self) -> Option<T> {
315+
(*self.inner.get()).take()
303316
}
304317
}
305318
}
@@ -309,11 +322,11 @@ impl<T: 'static> LocalKey<T> {
309322
#[doc(hidden)]
310323
#[cfg(all(target_arch = "wasm32", not(target_feature = "atomics")))]
311324
pub mod statik {
312-
use crate::cell::UnsafeCell;
325+
use super::lazy::LazyKeyInner;
313326
use crate::fmt;
314327

315328
pub struct Key<T> {
316-
inner: UnsafeCell<Option<T>>,
329+
inner: LazyKeyInner<T>,
317330
}
318331

319332
unsafe impl<T> Sync for Key<T> { }
@@ -327,32 +340,55 @@ pub mod statik {
327340
impl<T> Key<T> {
328341
pub const fn new() -> Key<T> {
329342
Key {
330-
inner: UnsafeCell::new(None),
343+
inner: LazyKeyInner::new(),
331344
}
332345
}
333346

334-
pub unsafe fn get(&self) -> Option<&'static UnsafeCell<Option<T>>> {
335-
Some(&*(&self.inner as *const _))
347+
pub unsafe fn get(&self, init: fn() -> T) -> Option<&'static T> {
348+
let value = match self.inner.get() {
349+
Some(ref value) => value,
350+
None => self.inner.initialize(init),
351+
};
352+
Some(value)
336353
}
337354
}
338355
}
339356

340357
#[doc(hidden)]
341358
#[cfg(target_thread_local)]
342359
pub mod fast {
343-
use crate::cell::{Cell, UnsafeCell};
360+
use super::lazy::LazyKeyInner;
361+
use crate::cell::Cell;
344362
use crate::fmt;
345363
use crate::mem;
346-
use crate::ptr;
347-
use crate::sys::fast_thread_local::{register_dtor, requires_move_before_drop};
364+
use crate::sys::fast_thread_local::register_dtor;
348365

366+
#[derive(Copy, Clone)]
367+
enum DtorState {
368+
Unregistered,
369+
Registered,
370+
RunningOrHasRun,
371+
}
372+
373+
// This data structure has been carefully constructed so that the fast path
374+
// only contains one branch on x86. That optimization is necessary to avoid
375+
// duplicated tls lookups on OSX.
376+
//
377+
// LLVM issue: https://bugs.llvm.org/show_bug.cgi?id=41722
349378
pub struct Key<T> {
350-
inner: UnsafeCell<Option<T>>,
379+
// If `LazyKeyInner::get` returns `None`, that indicates either:
380+
// * The value has never been initialized
381+
// * The value is being recursively initialized
382+
// * The value has already been destroyed or is being destroyed
383+
// To determine which kind of `None`, check `dtor_state`.
384+
//
385+
// This is very optimizer friendly for the fast path - initialized but
386+
// not yet dropped.
387+
inner: LazyKeyInner<T>,
351388

352389
// Metadata to keep track of the state of the destructor. Remember that
353-
// these variables are thread-local, not global.
354-
dtor_registered: Cell<bool>,
355-
dtor_running: Cell<bool>,
390+
// this variable is thread-local, not global.
391+
dtor_state: Cell<DtorState>,
356392
}
357393

358394
impl<T> fmt::Debug for Key<T> {
@@ -364,54 +400,75 @@ pub mod fast {
364400
impl<T> Key<T> {
365401
pub const fn new() -> Key<T> {
366402
Key {
367-
inner: UnsafeCell::new(None),
368-
dtor_registered: Cell::new(false),
369-
dtor_running: Cell::new(false)
403+
inner: LazyKeyInner::new(),
404+
dtor_state: Cell::new(DtorState::Unregistered),
370405
}
371406
}
372407

373-
pub unsafe fn get(&self) -> Option<&'static UnsafeCell<Option<T>>> {
374-
if mem::needs_drop::<T>() && self.dtor_running.get() {
375-
return None
408+
pub unsafe fn get<F: FnOnce() -> T>(&self, init: F) -> Option<&'static T> {
409+
match self.inner.get() {
410+
Some(val) => Some(val),
411+
None => self.try_initialize(init),
376412
}
377-
self.register_dtor();
378-
Some(&*(&self.inner as *const _))
379413
}
380414

381-
unsafe fn register_dtor(&self) {
382-
if !mem::needs_drop::<T>() || self.dtor_registered.get() {
383-
return
415+
// `try_initialize` is only called once per fast thread local variable,
416+
// except in corner cases where thread_local dtors reference other
417+
// thread_local's, or it is being recursively initialized.
418+
//
419+
// Macos: Inlining this function can cause two `tlv_get_addr` calls to
420+
// be performed for every call to `Key::get`. The #[cold] hint makes
421+
// that less likely.
422+
// LLVM issue: https://bugs.llvm.org/show_bug.cgi?id=41722
423+
#[cold]
424+
unsafe fn try_initialize<F: FnOnce() -> T>(&self, init: F) -> Option<&'static T> {
425+
if !mem::needs_drop::<T>() || self.try_register_dtor() {
426+
Some(self.inner.initialize(init))
427+
} else {
428+
None
384429
}
430+
}
385431

386-
register_dtor(self as *const _ as *mut u8,
387-
destroy_value::<T>);
388-
self.dtor_registered.set(true);
432+
// `try_register_dtor` is only called once per fast thread local
433+
// variable, except in corner cases where thread_local dtors reference
434+
// other thread_local's, or it is being recursively initialized.
435+
unsafe fn try_register_dtor(&self) -> bool {
436+
match self.dtor_state.get() {
437+
DtorState::Unregistered => {
438+
// dtor registration happens before initialization.
439+
register_dtor(self as *const _ as *mut u8,
440+
destroy_value::<T>);
441+
self.dtor_state.set(DtorState::Registered);
442+
true
443+
}
444+
DtorState::Registered => {
445+
// recursively initialized
446+
true
447+
}
448+
DtorState::RunningOrHasRun => {
449+
false
450+
}
451+
}
389452
}
390453
}
391454

392455
unsafe extern fn destroy_value<T>(ptr: *mut u8) {
393456
let ptr = ptr as *mut Key<T>;
394-
// Right before we run the user destructor be sure to flag the
395-
// destructor as running for this thread so calls to `get` will return
396-
// `None`.
397-
(*ptr).dtor_running.set(true);
398457

399-
// Some implementations may require us to move the value before we drop
400-
// it as it could get re-initialized in-place during destruction.
401-
//
402-
// Hence, we use `ptr::read` on those platforms (to move to a "safe"
403-
// location) instead of drop_in_place.
404-
if requires_move_before_drop() {
405-
ptr::read((*ptr).inner.get());
406-
} else {
407-
ptr::drop_in_place((*ptr).inner.get());
408-
}
458+
// Right before we run the user destructor be sure to set the
459+
// `Option<T>` to `None`, and `dtor_state` to `RunningOrHasRun`. This
460+
// causes future calls to `get` to run `try_initialize_drop` again,
461+
// which will now fail, and return `None`.
462+
let value = (*ptr).inner.take();
463+
(*ptr).dtor_state.set(DtorState::RunningOrHasRun);
464+
drop(value);
409465
}
410466
}
411467

412468
#[doc(hidden)]
413469
pub mod os {
414-
use crate::cell::{Cell, UnsafeCell};
470+
use super::lazy::LazyKeyInner;
471+
use crate::cell::Cell;
415472
use crate::fmt;
416473
use crate::marker;
417474
use crate::ptr;
@@ -432,8 +489,8 @@ pub mod os {
432489
unsafe impl<T> Sync for Key<T> { }
433490

434491
struct Value<T: 'static> {
492+
inner: LazyKeyInner<T>,
435493
key: &'static Key<T>,
436-
value: UnsafeCell<Option<T>>,
437494
}
438495

439496
impl<T: 'static> Key<T> {
@@ -444,24 +501,43 @@ pub mod os {
444501
}
445502
}
446503

447-
pub unsafe fn get(&'static self) -> Option<&'static UnsafeCell<Option<T>>> {
504+
pub unsafe fn get(&'static self, init: fn() -> T) -> Option<&'static T> {
448505
let ptr = self.os.get() as *mut Value<T>;
449-
if !ptr.is_null() {
450-
if ptr as usize == 1 {
451-
return None
506+
if ptr as usize > 1 {
507+
match (*ptr).inner.get() {
508+
Some(ref value) => return Some(value),
509+
None => {},
452510
}
453-
return Some(&(*ptr).value);
511+
}
512+
self.try_initialize(init)
513+
}
514+
515+
// `try_initialize` is only called once per os thread local variable,
516+
// except in corner cases where thread_local dtors reference other
517+
// thread_local's, or it is being recursively initialized.
518+
unsafe fn try_initialize(&'static self, init: fn() -> T) -> Option<&'static T> {
519+
let ptr = self.os.get() as *mut Value<T>;
520+
if ptr as usize == 1 {
521+
// destructor is running
522+
return None
454523
}
455524

456-
// If the lookup returned null, we haven't initialized our own
457-
// local copy, so do that now.
458-
let ptr: Box<Value<T>> = box Value {
459-
key: self,
460-
value: UnsafeCell::new(None),
525+
let ptr = if ptr.is_null() {
526+
// If the lookup returned null, we haven't initialized our own
527+
// local copy, so do that now.
528+
let ptr: Box<Value<T>> = box Value {
529+
inner: LazyKeyInner::new(),
530+
key: self,
531+
};
532+
let ptr = Box::into_raw(ptr);
533+
self.os.set(ptr as *mut u8);
534+
ptr
535+
} else {
536+
// recursive initialization
537+
ptr
461538
};
462-
let ptr = Box::into_raw(ptr);
463-
self.os.set(ptr as *mut u8);
464-
Some(&(*ptr).value)
539+
540+
Some((*ptr).inner.initialize(init))
465541
}
466542
}
467543

‎src/test/compile-fail/issue-43733-2.rs

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -5,15 +5,15 @@
55
#[cfg(not(target_thread_local))]
66
struct Key<T> {
77
_data: std::cell::UnsafeCell<Option<T>>,
8-
_flag: std::cell::Cell<bool>,
8+
_flag: std::cell::Cell<()>,
99
}
1010

1111
#[cfg(not(target_thread_local))]
1212
impl<T> Key<T> {
1313
const fn new() -> Self {
1414
Key {
1515
_data: std::cell::UnsafeCell::new(None),
16-
_flag: std::cell::Cell::new(false),
16+
_flag: std::cell::Cell::new(()),
1717
}
1818
}
1919
}
@@ -23,6 +23,6 @@ use std::thread::__FastLocalKeyInner as Key;
2323

2424
static __KEY: Key<()> = Key::new();
2525
//~^ ERROR `std::cell::UnsafeCell<std::option::Option<()>>` cannot be shared between threads
26-
//~| ERROR `std::cell::Cell<bool>` cannot be shared between threads safely [E0277]
26+
//~| ERROR cannot be shared between threads safely [E0277]
2727

2828
fn main() {}

‎src/test/ui/issues/issue-43733.rs

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -13,15 +13,13 @@ static __KEY: std::thread::__FastLocalKeyInner<Foo> =
1313
static __KEY: std::thread::__OsLocalKeyInner<Foo> =
1414
std::thread::__OsLocalKeyInner::new();
1515

16-
fn __getit() -> std::option::Option<
17-
&'static std::cell::UnsafeCell<
18-
std::option::Option<Foo>>>
16+
fn __getit() -> std::option::Option<&'static Foo>
1917
{
20-
__KEY.get() //~ ERROR call to unsafe function is unsafe
18+
__KEY.get(Default::default) //~ ERROR call to unsafe function is unsafe
2119
}
2220

2321
static FOO: std::thread::LocalKey<Foo> =
24-
std::thread::LocalKey::new(__getit, Default::default);
22+
std::thread::LocalKey::new(__getit);
2523
//~^ ERROR call to unsafe function is unsafe
2624

2725
fn main() {

‎src/test/ui/issues/issue-43733.stderr

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1,16 +1,16 @@
11
error[E0133]: call to unsafe function is unsafe and requires unsafe function or block
2-
--> $DIR/issue-43733.rs:20:5
2+
--> $DIR/issue-43733.rs:18:5
33
|
4-
LL | __KEY.get()
5-
| ^^^^^^^^^^^ call to unsafe function
4+
LL | __KEY.get(Default::default)
5+
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^ call to unsafe function
66
|
77
= note: consult the function's documentation for information on how to avoid undefined behavior
88

99
error[E0133]: call to unsafe function is unsafe and requires unsafe function or block
10-
--> $DIR/issue-43733.rs:24:5
10+
--> $DIR/issue-43733.rs:22:5
1111
|
12-
LL | std::thread::LocalKey::new(__getit, Default::default);
13-
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ call to unsafe function
12+
LL | std::thread::LocalKey::new(__getit);
13+
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ call to unsafe function
1414
|
1515
= note: consult the function's documentation for information on how to avoid undefined behavior
1616

0 commit comments

Comments
 (0)
Please sign in to comment.