@@ -123,6 +123,9 @@ impl<F, T, S, M> RawTask<F, T, S, M> {
123
123
let offset_r = offset_union;
124
124
125
125
TaskLayout {
126
+ // SAFETY: layout came from a Layout::extend call, which dynamically checks the
127
+ // invariants for StdLayout and returns None if they are not met. The leap_unwrap!
128
+ // would have panicked before this point.
126
129
layout : unsafe { layout. into_std ( ) } ,
127
130
offset_s,
128
131
offset_f,
@@ -167,6 +170,8 @@ where
167
170
Some ( p) => p,
168
171
} ;
169
172
173
+ // SAFETY: task_layout.layout has the correct layout for a C-style struct of Header
174
+ // followed by S followed by union { F, T }.
170
175
let raw = Self :: from_ptr ( ptr. as_ptr ( ) ) ;
171
176
172
177
let crate :: Builder {
@@ -176,6 +181,10 @@ where
176
181
} = builder;
177
182
178
183
// Write the header as the first field of the task.
184
+ // SAFETY: This write it OK because it's through a mutable pointer to a Header<M> that
185
+ // is definitely properly aligned and points to enough memory for a Header<M>. We
186
+ // didn't pass our pointer through any const references or other const-ifying
187
+ // operations so the provenance is good.
179
188
( raw. header as * mut Header < M > ) . write ( Header {
180
189
state : AtomicUsize :: new ( SCHEDULED | TASK | REFERENCE ) ,
181
190
awaiter : UnsafeCell :: new ( None ) ,
@@ -195,25 +204,37 @@ where
195
204
} ) ;
196
205
197
206
// Write the schedule function as the third field of the task.
207
+ // SAFETY: raw.schedule is also non-null, properly aligned, valid for writes of size
208
+ // size_of::<Schedule>().
198
209
( raw. schedule as * mut S ) . write ( schedule) ;
199
210
200
211
// Generate the future, now that the metadata has been pinned in place.
212
+ // SAFETY: Dereferencing raw.header is OK because it's properly initialized since we
213
+ // wrote to it.
201
214
let future = abort_on_panic ( || future ( & ( * raw. header ) . metadata ) ) ;
202
215
203
216
// Write the future as the fourth field of the task.
217
+ // SAFETY: This write is OK because raw.future is non-null, properly-aligned, and valid
218
+ // for writes of size F. Because we're not casting anything here we know it's the right
219
+ // type.
204
220
raw. future . write ( future) ;
205
221
206
222
ptr
207
223
}
208
224
}
209
225
210
226
/// Creates a `RawTask` from a raw task pointer.
227
+ ///
228
+ /// ptr must point to a region that has a size and alignment matching task layout, since doing
229
+ /// pointer arithmetic that leaves the region or creating unaligned pointers is UB.
211
230
#[ inline]
212
- pub ( crate ) fn from_ptr ( ptr : * const ( ) ) -> Self {
231
+ pub ( crate ) unsafe fn from_ptr ( ptr : * const ( ) ) -> Self {
213
232
let task_layout = Self :: task_layout ( ) ;
214
233
let p = ptr as * const u8 ;
215
234
216
235
unsafe {
236
+ // SAFETY: We're just picking apart the given pointer into its constituent fields.
237
+ // These do correctly correspond to the fields as laid out in task_layout.
217
238
Self {
218
239
header : p as * const Header < M > ,
219
240
schedule : p. add ( task_layout. offset_s ) as * const S ,
@@ -229,6 +250,8 @@ where
229
250
Self :: TASK_LAYOUT
230
251
}
231
252
/// Wakes a waker.
253
+ ///
254
+ /// Assumes ptr points to a valid task.
232
255
unsafe fn wake ( ptr : * const ( ) ) {
233
256
// This is just an optimization. If the schedule function has captured variables, then
234
257
// we'll do less reference counting if we wake the waker by reference and then drop it.
@@ -240,6 +263,8 @@ where
240
263
241
264
let raw = Self :: from_ptr ( ptr) ;
242
265
266
+ // SAFETY: This is just loading the state. Note that this does implicitly create an
267
+ // &AtomicUsize, which is intentional.
243
268
let mut state = ( * raw. header ) . state . load ( Ordering :: Acquire ) ;
244
269
245
270
loop {
@@ -295,6 +320,8 @@ where
295
320
}
296
321
297
322
/// Wakes a waker by reference.
323
+ ///
324
+ /// Assumes ptr points to a valid task.
298
325
unsafe fn wake_by_ref ( ptr : * const ( ) ) {
299
326
let raw = Self :: from_ptr ( ptr) ;
300
327
@@ -346,6 +373,8 @@ where
346
373
// because the schedule function cannot be destroyed while the waker is
347
374
// still alive.
348
375
let task = Runnable :: from_raw ( NonNull :: new_unchecked ( ptr as * mut ( ) ) ) ;
376
+ // SAFETY: The task is still alive, so we can call its schedule
377
+ // function.
349
378
( * raw. schedule ) . schedule ( task, ScheduleInfo :: new ( false ) ) ;
350
379
}
351
380
@@ -394,9 +423,17 @@ where
394
423
( * raw. header )
395
424
. state
396
425
. store ( SCHEDULED | CLOSED | REFERENCE , Ordering :: Release ) ;
426
+ // SAFETY: ptr still points to a valid task even though its refcount has dropped
427
+ // to zero.
428
+ // NOTE: We should make sure that the executor is properly dropping scheduled tasks
429
+ // with a refcount of zero.
397
430
Self :: schedule ( ptr, ScheduleInfo :: new ( false ) ) ;
398
431
} else {
399
432
// Otherwise, destroy the task right away.
433
+ // NOTE: This isn't going to drop the output/result from the future. We have to
434
+ // have already dealt with it, so whoever is calling drop_waker needs to be
435
+ // checked. It looks like whoever sets the TASK bit to zero is affirming that they
436
+ // have moved or dropped the output/result.
400
437
Self :: destroy ( ptr) ;
401
438
}
402
439
}
@@ -435,6 +472,8 @@ where
435
472
}
436
473
437
474
let task = Runnable :: from_raw ( NonNull :: new_unchecked ( ptr as * mut ( ) ) ) ;
475
+ // NOTE: The schedule function has to drop tasks with a refcount of zero. That's not
476
+ // happening in this function, so it has to be happening in the schedule member function.
438
477
( * raw. schedule ) . schedule ( task, info) ;
439
478
}
440
479
@@ -459,6 +498,9 @@ where
459
498
///
460
499
/// The schedule function will be dropped, and the task will then get deallocated.
461
500
/// The task must be closed before this function is called.
501
+ ///
502
+ /// NOTE: Whoever calls this function has to have already dealt with the return value of the
503
+ /// future or its error if it failed. We are not going to drop it!
462
504
#[ inline]
463
505
unsafe fn destroy ( ptr : * const ( ) ) {
464
506
let raw = Self :: from_ptr ( ptr) ;
@@ -467,13 +509,18 @@ where
467
509
// We need a safeguard against panics because destructors can panic.
468
510
abort_on_panic ( || {
469
511
// Drop the header along with the metadata.
512
+ // SAFETY: This points to a valid Header<M> that we have permission to move out of and
513
+ // drop.
470
514
( raw. header as * mut Header < M > ) . drop_in_place ( ) ;
471
515
472
516
// Drop the schedule function.
517
+ // SAFETY: This points to a valid S that we have permission to move out of and drop.
473
518
( raw. schedule as * mut S ) . drop_in_place ( ) ;
474
519
} ) ;
475
520
476
521
// Finally, deallocate the memory reserved by the task.
522
+ // SAFETY: We know that ptr was allocated with layout task_layout.layout, so deallocating
523
+ // it with the same layout is correct.
477
524
alloc:: alloc:: dealloc ( ptr as * mut u8 , task_layout. layout ) ;
478
525
}
479
526
@@ -482,9 +529,11 @@ where
482
529
/// If polling its future panics, the task will be closed and the panic will be propagated into
483
530
/// the caller.
484
531
unsafe fn run ( ptr : * const ( ) ) -> bool {
532
+ // SAFETY: As long as it's a pointer to a valid task, we can get the raw form of it.
485
533
let raw = Self :: from_ptr ( ptr) ;
486
534
487
535
// Create a context from the raw task pointer and the vtable inside the its header.
536
+ // SAFETY: The implementation of RAW_WAKER_VTABLE is correct.
488
537
let waker = ManuallyDrop :: new ( Waker :: from_raw ( RawWaker :: new ( ptr, & Self :: RAW_WAKER_VTABLE ) ) ) ;
489
538
let cx = & mut Context :: from_waker ( & waker) ;
490
539
@@ -507,6 +556,8 @@ where
507
556
}
508
557
509
558
// Drop the task reference.
559
+ // SAFETY: This pointer is definitely alive. The Waker that is registered into the
560
+ // executor holds it.
510
561
Self :: drop_ref ( ptr) ;
511
562
512
563
// Notify the awaiter that the future has been dropped.
@@ -563,7 +614,10 @@ where
563
614
match poll {
564
615
Poll :: Ready ( out) => {
565
616
// Replace the future with its output.
617
+ // SAFETY: We have exclusive access to the task so we can drop the future for it.
566
618
Self :: drop_future ( ptr) ;
619
+ // SAFETY: raw.output definitely points to a valid memory location to hold the
620
+ // Output type of the future.
567
621
raw. output . write ( out) ;
568
622
569
623
// The task is now completed.
@@ -593,10 +647,12 @@ where
593
647
// Take the awaiter out.
594
648
let mut awaiter = None ;
595
649
if state & AWAITER != 0 {
650
+ // SAFETY: This is safe for the same reasons as we said earlier.
596
651
awaiter = ( * raw. header ) . take ( None ) ;
597
652
}
598
653
599
654
// Drop the task reference.
655
+ // SAFETY: We "own" the ref to this task and are allowed to drop it.
600
656
Self :: drop_ref ( ptr) ;
601
657
602
658
// Notify the awaiter that the future has been dropped.
@@ -625,6 +681,9 @@ where
625
681
if state & CLOSED != 0 && !future_dropped {
626
682
// The thread that closed the task didn't drop the future because it was
627
683
// running so now it's our responsibility to do so.
684
+ // SAFETY: This is corroborated by header.rs where they state that closing
685
+ // a task doesn't drop the future, it just marks it closed and puts it back
686
+ // in the polling queue so a poller can drop it.
628
687
Self :: drop_future ( ptr) ;
629
688
future_dropped = true ;
630
689
}
@@ -648,6 +707,8 @@ where
648
707
}
649
708
650
709
// Drop the task reference.
710
+ // SAFETY: We're allowed to drop the ref as stated earlier. We
711
+ // checked that it won't accidentally be double-dropped.
651
712
Self :: drop_ref ( ptr) ;
652
713
653
714
// Notify the awaiter that the future has been dropped.
@@ -657,10 +718,13 @@ where
657
718
} else if state & SCHEDULED != 0 {
658
719
// The thread that woke the task up didn't reschedule it because
659
720
// it was running so now it's our responsibility to do so.
721
+ // SAFETY: ptr definitely points to a valid task that hasn't been
722
+ // dropped. It has its SCHEDULED bit set.
660
723
Self :: schedule ( ptr, ScheduleInfo :: new ( true ) ) ;
661
724
return true ;
662
725
} else {
663
726
// Drop the task reference.
727
+ // SAFETY: We're still allowed.
664
728
Self :: drop_ref ( ptr) ;
665
729
}
666
730
break ;
@@ -697,6 +761,7 @@ where
697
761
if state & CLOSED != 0 {
698
762
// The thread that closed the task didn't drop the future because it
699
763
// was running so now it's our responsibility to do so.
764
+ // SAFETY: If poll panicked then the thread didn't drop the future.
700
765
RawTask :: < F , T , S , M > :: drop_future ( ptr) ;
701
766
702
767
// Mark the task as not running and not scheduled.
@@ -711,6 +776,7 @@ where
711
776
}
712
777
713
778
// Drop the task reference.
779
+ // SAFETY: We still have permission to drop a ref.
714
780
RawTask :: < F , T , S , M > :: drop_ref ( ptr) ;
715
781
716
782
// Notify the awaiter that the future has been dropped.
@@ -729,6 +795,8 @@ where
729
795
) {
730
796
Ok ( state) => {
731
797
// Drop the future because the task is now closed.
798
+ // SAFETY: This is effectively the same situation as earlier.
799
+ // TODO: DRY this up by refactoring this.
732
800
RawTask :: < F , T , S , M > :: drop_future ( ptr) ;
733
801
734
802
// Take the awaiter out.
0 commit comments