Skip to content

Commit ffd2657

Browse files
committed
Add SAFETY comments and notes for unsafe blocks
1 parent 3065c37 commit ffd2657

File tree

4 files changed

+122
-4
lines changed

4 files changed

+122
-4
lines changed

src/header.rs

+14-3
Original file line numberDiff line numberDiff line change
@@ -62,8 +62,10 @@ impl<M> Header<M> {
6262
let state = self.state.fetch_or(NOTIFYING, Ordering::AcqRel);
6363

6464
// If the task was not notifying or registering an awaiter...
65+
// Note: The NOTIFYING and REGISTERING bits act as locks on the awaiter UnsafeCell.
6566
if state & (NOTIFYING | REGISTERING) == 0 {
6667
// Take the waker out.
68+
// SAFETY: self.awaiter is tied to atomic ordering operations on self.state.
6769
let waker = unsafe { (*self.awaiter.get()).take() };
6870

6971
// Unset the bit indicating that the task is notifying its awaiter.
@@ -92,9 +94,10 @@ impl<M> Header<M> {
9294
let mut state = self.state.fetch_or(0, Ordering::Acquire);
9395

9496
loop {
95-
// There can't be two concurrent registrations because `Task` can only be polled
96-
// by a unique pinned reference.
97-
debug_assert!(state & REGISTERING == 0);
97+
// There can't be two concurrent registrations because `Task` can only be polled by a
98+
// unique pinned reference. Enforcing this here instead of marking the whole function
99+
// unsafe.
100+
assert!(state & REGISTERING == 0);
98101

99102
// If we're in the notifying state at this moment, just wake and return without
100103
// registering.
@@ -119,6 +122,8 @@ impl<M> Header<M> {
119122
}
120123

121124
// Put the waker into the awaiter field.
125+
// SAFETY: We have OR'd the state of the header with REGISTERING so we have a lock on
126+
// self.awaiter and can write to it.
122127
unsafe {
123128
abort_on_panic(|| (*self.awaiter.get()) = Some(waker.clone()));
124129
}
@@ -130,6 +135,12 @@ impl<M> Header<M> {
130135
loop {
131136
// If there was a notification, take the waker out of the awaiter field.
132137
if state & NOTIFYING != 0 {
138+
// SAFETY: We have guaranteed that self.state is or'd with NOTIFYING, which
139+
// prevents everyone else from writing to self.awaiter. So we know that we won't
140+
// race with any writes from other threads.
141+
// We can't reach this branch on the first loop through, but we can if someone
142+
// notifies the task while we are in the middle of registering. Normally, they
143+
// would also take a waker, but they won't if we have the NOTIFYING bit set.
133144
if let Some(w) = unsafe { (*self.awaiter.get()).take() } {
134145
abort_on_panic(|| waker = Some(w));
135146
}

src/raw.rs

+69-1
Original file line numberDiff line numberDiff line change
@@ -123,6 +123,9 @@ impl<F, T, S, M> RawTask<F, T, S, M> {
123123
let offset_r = offset_union;
124124

125125
TaskLayout {
126+
// SAFETY: layout came from a Layout::extend call, which dynamically checks the
127+
// invariants for StdLayout and returns None if they are not met. The leap_unwrap!
128+
// would have panicked before this point.
126129
layout: unsafe { layout.into_std() },
127130
offset_s,
128131
offset_f,
@@ -167,6 +170,8 @@ where
167170
Some(p) => p,
168171
};
169172

173+
// SAFETY: task_layout.layout has the correct layout for a C-style struct of Header
174+
// followed by S followed by union { F, T }.
170175
let raw = Self::from_ptr(ptr.as_ptr());
171176

172177
let crate::Builder {
@@ -176,6 +181,10 @@ where
176181
} = builder;
177182

178183
// Write the header as the first field of the task.
184+
// SAFETY: This write it OK because it's through a mutable pointer to a Header<M> that
185+
// is definitely properly aligned and points to enough memory for a Header<M>. We
186+
// didn't pass our pointer through any const references or other const-ifying
187+
// operations so the provenance is good.
179188
(raw.header as *mut Header<M>).write(Header {
180189
state: AtomicUsize::new(SCHEDULED | TASK | REFERENCE),
181190
awaiter: UnsafeCell::new(None),
@@ -195,25 +204,37 @@ where
195204
});
196205

197206
// Write the schedule function as the third field of the task.
207+
// SAFETY: raw.schedule is also non-null, properly aligned, valid for writes of size
208+
// size_of::<Schedule>().
198209
(raw.schedule as *mut S).write(schedule);
199210

200211
// Generate the future, now that the metadata has been pinned in place.
212+
// SAFETY: Dereferencing raw.header is OK because it's properly initialized since we
213+
// wrote to it.
201214
let future = abort_on_panic(|| future(&(*raw.header).metadata));
202215

203216
// Write the future as the fourth field of the task.
217+
// SAFETY: This write is OK because raw.future is non-null, properly-aligned, and valid
218+
// for writes of size F. Because we're not casting anything here we know it's the right
219+
// type.
204220
raw.future.write(future);
205221

206222
ptr
207223
}
208224
}
209225

210226
/// Creates a `RawTask` from a raw task pointer.
227+
///
228+
/// ptr must point to a region that has a size and alignment matching task layout, since doing
229+
/// pointer arithmetic that leaves the region or creating unaligned pointers is UB.
211230
#[inline]
212-
pub(crate) fn from_ptr(ptr: *const ()) -> Self {
231+
pub(crate) unsafe fn from_ptr(ptr: *const ()) -> Self {
213232
let task_layout = Self::task_layout();
214233
let p = ptr as *const u8;
215234

216235
unsafe {
236+
// SAFETY: We're just picking apart the given pointer into its constituent fields.
237+
// These do correctly correspond to the fields as laid out in task_layout.
217238
Self {
218239
header: p as *const Header<M>,
219240
schedule: p.add(task_layout.offset_s) as *const S,
@@ -229,6 +250,8 @@ where
229250
Self::TASK_LAYOUT
230251
}
231252
/// Wakes a waker.
253+
///
254+
/// Assumes ptr points to a valid task.
232255
unsafe fn wake(ptr: *const ()) {
233256
// This is just an optimization. If the schedule function has captured variables, then
234257
// we'll do less reference counting if we wake the waker by reference and then drop it.
@@ -240,6 +263,8 @@ where
240263

241264
let raw = Self::from_ptr(ptr);
242265

266+
// SAFETY: This is just loading the state. Note that this does implicitly create an
267+
// &AtomicUsize, which is intentional.
243268
let mut state = (*raw.header).state.load(Ordering::Acquire);
244269

245270
loop {
@@ -295,6 +320,8 @@ where
295320
}
296321

297322
/// Wakes a waker by reference.
323+
///
324+
/// Assumes ptr points to a valid task.
298325
unsafe fn wake_by_ref(ptr: *const ()) {
299326
let raw = Self::from_ptr(ptr);
300327

@@ -346,6 +373,8 @@ where
346373
// because the schedule function cannot be destroyed while the waker is
347374
// still alive.
348375
let task = Runnable::from_raw(NonNull::new_unchecked(ptr as *mut ()));
376+
// SAFETY: The task is still alive, so we can call its schedule
377+
// function.
349378
(*raw.schedule).schedule(task, ScheduleInfo::new(false));
350379
}
351380

@@ -394,9 +423,17 @@ where
394423
(*raw.header)
395424
.state
396425
.store(SCHEDULED | CLOSED | REFERENCE, Ordering::Release);
426+
// SAFETY: ptr still points to a valid task even though its refcount has dropped
427+
// to zero.
428+
// NOTE: We should make sure that the executor is properly dropping scheduled tasks
429+
// with a refcount of zero.
397430
Self::schedule(ptr, ScheduleInfo::new(false));
398431
} else {
399432
// Otherwise, destroy the task right away.
433+
// NOTE: This isn't going to drop the output/result from the future. We have to
434+
// have already dealt with it, so whoever is calling drop_waker needs to be
435+
// checked. It looks like whoever sets the TASK bit to zero is affirming that they
436+
// have moved or dropped the output/result.
400437
Self::destroy(ptr);
401438
}
402439
}
@@ -435,6 +472,8 @@ where
435472
}
436473

437474
let task = Runnable::from_raw(NonNull::new_unchecked(ptr as *mut ()));
475+
// NOTE: The schedule function has to drop tasks with a refcount of zero. That's not
476+
// happening in this function, so it has to be happening in the schedule member function.
438477
(*raw.schedule).schedule(task, info);
439478
}
440479

@@ -459,6 +498,9 @@ where
459498
///
460499
/// The schedule function will be dropped, and the task will then get deallocated.
461500
/// The task must be closed before this function is called.
501+
///
502+
/// NOTE: Whoever calls this function has to have already dealt with the return value of the
503+
/// future or its error if it failed. We are not going to drop it!
462504
#[inline]
463505
unsafe fn destroy(ptr: *const ()) {
464506
let raw = Self::from_ptr(ptr);
@@ -467,13 +509,18 @@ where
467509
// We need a safeguard against panics because destructors can panic.
468510
abort_on_panic(|| {
469511
// Drop the header along with the metadata.
512+
// SAFETY: This points to a valid Header<M> that we have permission to move out of and
513+
// drop.
470514
(raw.header as *mut Header<M>).drop_in_place();
471515

472516
// Drop the schedule function.
517+
// SAFETY: This points to a valid S that we have permission to move out of and drop.
473518
(raw.schedule as *mut S).drop_in_place();
474519
});
475520

476521
// Finally, deallocate the memory reserved by the task.
522+
// SAFETY: We know that ptr was allocated with layout task_layout.layout, so deallocating
523+
// it with the same layout is correct.
477524
alloc::alloc::dealloc(ptr as *mut u8, task_layout.layout);
478525
}
479526

@@ -482,9 +529,11 @@ where
482529
/// If polling its future panics, the task will be closed and the panic will be propagated into
483530
/// the caller.
484531
unsafe fn run(ptr: *const ()) -> bool {
532+
// SAFETY: As long as it's a pointer to a valid task, we can get the raw form of it.
485533
let raw = Self::from_ptr(ptr);
486534

487535
// Create a context from the raw task pointer and the vtable inside the its header.
536+
// SAFETY: The implementation of RAW_WAKER_VTABLE is correct.
488537
let waker = ManuallyDrop::new(Waker::from_raw(RawWaker::new(ptr, &Self::RAW_WAKER_VTABLE)));
489538
let cx = &mut Context::from_waker(&waker);
490539

@@ -507,6 +556,8 @@ where
507556
}
508557

509558
// Drop the task reference.
559+
// SAFETY: This pointer is definitely alive. The Waker that is registered into the
560+
// executor holds it.
510561
Self::drop_ref(ptr);
511562

512563
// Notify the awaiter that the future has been dropped.
@@ -563,7 +614,10 @@ where
563614
match poll {
564615
Poll::Ready(out) => {
565616
// Replace the future with its output.
617+
// SAFETY: We have exclusive access to the task so we can drop the future for it.
566618
Self::drop_future(ptr);
619+
// SAFETY: raw.output definitely points to a valid memory location to hold the
620+
// Output type of the future.
567621
raw.output.write(out);
568622

569623
// The task is now completed.
@@ -593,10 +647,12 @@ where
593647
// Take the awaiter out.
594648
let mut awaiter = None;
595649
if state & AWAITER != 0 {
650+
// SAFETY: This is safe for the same reasons as we said earlier.
596651
awaiter = (*raw.header).take(None);
597652
}
598653

599654
// Drop the task reference.
655+
// SAFETY: We "own" the ref to this task and are allowed to drop it.
600656
Self::drop_ref(ptr);
601657

602658
// Notify the awaiter that the future has been dropped.
@@ -625,6 +681,9 @@ where
625681
if state & CLOSED != 0 && !future_dropped {
626682
// The thread that closed the task didn't drop the future because it was
627683
// running so now it's our responsibility to do so.
684+
// SAFETY: This is corroborated by header.rs where they state that closing
685+
// a task doesn't drop the future, it just marks it closed and puts it back
686+
// in the polling queue so a poller can drop it.
628687
Self::drop_future(ptr);
629688
future_dropped = true;
630689
}
@@ -648,6 +707,8 @@ where
648707
}
649708

650709
// Drop the task reference.
710+
// SAFETY: We're allowed to drop the ref as stated earlier. We
711+
// checked that it won't accidentally be double-dropped.
651712
Self::drop_ref(ptr);
652713

653714
// Notify the awaiter that the future has been dropped.
@@ -657,10 +718,13 @@ where
657718
} else if state & SCHEDULED != 0 {
658719
// The thread that woke the task up didn't reschedule it because
659720
// it was running so now it's our responsibility to do so.
721+
// SAFETY: ptr definitely points to a valid task that hasn't been
722+
// dropped. It has its SCHEDULED bit set.
660723
Self::schedule(ptr, ScheduleInfo::new(true));
661724
return true;
662725
} else {
663726
// Drop the task reference.
727+
// SAFETY: We're still allowed.
664728
Self::drop_ref(ptr);
665729
}
666730
break;
@@ -697,6 +761,7 @@ where
697761
if state & CLOSED != 0 {
698762
// The thread that closed the task didn't drop the future because it
699763
// was running so now it's our responsibility to do so.
764+
// SAFETY: If poll panicked then the thread didn't drop the future.
700765
RawTask::<F, T, S, M>::drop_future(ptr);
701766

702767
// Mark the task as not running and not scheduled.
@@ -711,6 +776,7 @@ where
711776
}
712777

713778
// Drop the task reference.
779+
// SAFETY: We still have permission to drop a ref.
714780
RawTask::<F, T, S, M>::drop_ref(ptr);
715781

716782
// Notify the awaiter that the future has been dropped.
@@ -729,6 +795,8 @@ where
729795
) {
730796
Ok(state) => {
731797
// Drop the future because the task is now closed.
798+
// SAFETY: This is effectively the same situation as earlier.
799+
// TODO: DRY this up by refactoring this.
732800
RawTask::<F, T, S, M>::drop_future(ptr);
733801

734802
// Take the awaiter out.

0 commit comments

Comments
 (0)