@@ -447,17 +447,29 @@ pub unsafe fn uninitialized<T>() -> T {
447
447
#[ stable( feature = "rust1" , since = "1.0.0" ) ]
448
448
pub fn swap < T > ( x : & mut T , y : & mut T ) {
449
449
unsafe {
450
+ // Give ourselves some scratch space to work with
451
+ let mut t: [ u8 ; 16 ] = mem:: uninitialized ( ) ;
452
+
450
453
let x = x as * mut T as * mut u8 ;
451
454
let y = y as * mut T as * mut u8 ;
455
+ let t = & mut t as * mut _ as * mut u8 ;
452
456
453
457
// can't use a for loop as the `range` impl calls `mem::swap` recursively
458
+ let len = size_of :: < T > ( ) as isize ;
454
459
let mut i = 0 ;
455
- while i < size_of :: < T > ( ) as isize {
456
- // use an xor-swap as x & y are guaranteed to never alias
457
- * x. offset ( i) ^= * y. offset ( i) ;
458
- * y. offset ( i) ^= * x. offset ( i) ;
459
- * x. offset ( i) ^= * y. offset ( i) ;
460
- i += 1 ;
460
+ while i + 16 <= len {
461
+ // Perform the swap 16 bytes at a time, `&mut` pointers never alias
462
+ ptr:: copy_nonoverlapping ( x. offset ( i) , t, 16 ) ;
463
+ ptr:: copy_nonoverlapping ( y. offset ( i) , x. offset ( i) , 16 ) ;
464
+ ptr:: copy_nonoverlapping ( t, y. offset ( i) , 16 ) ;
465
+ i += 16 ;
466
+ }
467
+ if i < len {
468
+ // Swap any remaining bytes
469
+ let rem = ( len - i) as usize ;
470
+ ptr:: copy_nonoverlapping ( x. offset ( i) , t, rem) ;
471
+ ptr:: copy_nonoverlapping ( y. offset ( i) , x. offset ( i) , rem) ;
472
+ ptr:: copy_nonoverlapping ( t, y. offset ( i) , rem) ;
461
473
}
462
474
}
463
475
}
0 commit comments