@@ -18605,7 +18605,7 @@ pub fn _mm_maskz_compress_pd(k: __mmask8, a: __m128d) -> __m128d {
18605
18605
#[target_feature(enable = "avx512f")]
18606
18606
#[unstable(feature = "stdarch_x86_avx512", issue = "111137")]
18607
18607
#[cfg_attr(test, assert_instr(vpcompressd))]
18608
- pub unsafe fn _mm512_mask_compressstoreu_epi32(base_addr: *mut u8 , k: __mmask16, a: __m512i) {
18608
+ pub unsafe fn _mm512_mask_compressstoreu_epi32(base_addr: *mut i32 , k: __mmask16, a: __m512i) {
18609
18609
vcompressstored(base_addr as *mut _, a.as_i32x16(), k)
18610
18610
}
18611
18611
@@ -18616,7 +18616,7 @@ pub unsafe fn _mm512_mask_compressstoreu_epi32(base_addr: *mut u8, k: __mmask16,
18616
18616
#[target_feature(enable = "avx512f,avx512vl")]
18617
18617
#[unstable(feature = "stdarch_x86_avx512", issue = "111137")]
18618
18618
#[cfg_attr(test, assert_instr(vpcompressd))]
18619
- pub unsafe fn _mm256_mask_compressstoreu_epi32(base_addr: *mut u8 , k: __mmask8, a: __m256i) {
18619
+ pub unsafe fn _mm256_mask_compressstoreu_epi32(base_addr: *mut i32 , k: __mmask8, a: __m256i) {
18620
18620
vcompressstored256(base_addr as *mut _, a.as_i32x8(), k)
18621
18621
}
18622
18622
@@ -18627,7 +18627,7 @@ pub unsafe fn _mm256_mask_compressstoreu_epi32(base_addr: *mut u8, k: __mmask8,
18627
18627
#[target_feature(enable = "avx512f,avx512vl")]
18628
18628
#[unstable(feature = "stdarch_x86_avx512", issue = "111137")]
18629
18629
#[cfg_attr(test, assert_instr(vpcompressd))]
18630
- pub unsafe fn _mm_mask_compressstoreu_epi32(base_addr: *mut u8 , k: __mmask8, a: __m128i) {
18630
+ pub unsafe fn _mm_mask_compressstoreu_epi32(base_addr: *mut i32 , k: __mmask8, a: __m128i) {
18631
18631
vcompressstored128(base_addr as *mut _, a.as_i32x4(), k)
18632
18632
}
18633
18633
@@ -18638,7 +18638,7 @@ pub unsafe fn _mm_mask_compressstoreu_epi32(base_addr: *mut u8, k: __mmask8, a:
18638
18638
#[target_feature(enable = "avx512f")]
18639
18639
#[unstable(feature = "stdarch_x86_avx512", issue = "111137")]
18640
18640
#[cfg_attr(test, assert_instr(vpcompressq))]
18641
- pub unsafe fn _mm512_mask_compressstoreu_epi64(base_addr: *mut u8 , k: __mmask8, a: __m512i) {
18641
+ pub unsafe fn _mm512_mask_compressstoreu_epi64(base_addr: *mut i64 , k: __mmask8, a: __m512i) {
18642
18642
vcompressstoreq(base_addr as *mut _, a.as_i64x8(), k)
18643
18643
}
18644
18644
@@ -18649,7 +18649,7 @@ pub unsafe fn _mm512_mask_compressstoreu_epi64(base_addr: *mut u8, k: __mmask8,
18649
18649
#[target_feature(enable = "avx512f,avx512vl")]
18650
18650
#[unstable(feature = "stdarch_x86_avx512", issue = "111137")]
18651
18651
#[cfg_attr(test, assert_instr(vpcompressq))]
18652
- pub unsafe fn _mm256_mask_compressstoreu_epi64(base_addr: *mut u8 , k: __mmask8, a: __m256i) {
18652
+ pub unsafe fn _mm256_mask_compressstoreu_epi64(base_addr: *mut i64 , k: __mmask8, a: __m256i) {
18653
18653
vcompressstoreq256(base_addr as *mut _, a.as_i64x4(), k)
18654
18654
}
18655
18655
@@ -18660,7 +18660,7 @@ pub unsafe fn _mm256_mask_compressstoreu_epi64(base_addr: *mut u8, k: __mmask8,
18660
18660
#[target_feature(enable = "avx512f,avx512vl")]
18661
18661
#[unstable(feature = "stdarch_x86_avx512", issue = "111137")]
18662
18662
#[cfg_attr(test, assert_instr(vpcompressq))]
18663
- pub unsafe fn _mm_mask_compressstoreu_epi64(base_addr: *mut u8 , k: __mmask8, a: __m128i) {
18663
+ pub unsafe fn _mm_mask_compressstoreu_epi64(base_addr: *mut i64 , k: __mmask8, a: __m128i) {
18664
18664
vcompressstoreq128(base_addr as *mut _, a.as_i64x2(), k)
18665
18665
}
18666
18666
@@ -18671,7 +18671,7 @@ pub unsafe fn _mm_mask_compressstoreu_epi64(base_addr: *mut u8, k: __mmask8, a:
18671
18671
#[target_feature(enable = "avx512f")]
18672
18672
#[unstable(feature = "stdarch_x86_avx512", issue = "111137")]
18673
18673
#[cfg_attr(test, assert_instr(vcompressps))]
18674
- pub unsafe fn _mm512_mask_compressstoreu_ps(base_addr: *mut u8 , k: __mmask16, a: __m512) {
18674
+ pub unsafe fn _mm512_mask_compressstoreu_ps(base_addr: *mut f32 , k: __mmask16, a: __m512) {
18675
18675
vcompressstoreps(base_addr as *mut _, a.as_f32x16(), k)
18676
18676
}
18677
18677
@@ -18682,7 +18682,7 @@ pub unsafe fn _mm512_mask_compressstoreu_ps(base_addr: *mut u8, k: __mmask16, a:
18682
18682
#[target_feature(enable = "avx512f,avx512vl")]
18683
18683
#[unstable(feature = "stdarch_x86_avx512", issue = "111137")]
18684
18684
#[cfg_attr(test, assert_instr(vcompressps))]
18685
- pub unsafe fn _mm256_mask_compressstoreu_ps(base_addr: *mut u8 , k: __mmask8, a: __m256) {
18685
+ pub unsafe fn _mm256_mask_compressstoreu_ps(base_addr: *mut f32 , k: __mmask8, a: __m256) {
18686
18686
vcompressstoreps256(base_addr as *mut _, a.as_f32x8(), k)
18687
18687
}
18688
18688
@@ -18693,7 +18693,7 @@ pub unsafe fn _mm256_mask_compressstoreu_ps(base_addr: *mut u8, k: __mmask8, a:
18693
18693
#[target_feature(enable = "avx512f,avx512vl")]
18694
18694
#[unstable(feature = "stdarch_x86_avx512", issue = "111137")]
18695
18695
#[cfg_attr(test, assert_instr(vcompressps))]
18696
- pub unsafe fn _mm_mask_compressstoreu_ps(base_addr: *mut u8 , k: __mmask8, a: __m128) {
18696
+ pub unsafe fn _mm_mask_compressstoreu_ps(base_addr: *mut f32 , k: __mmask8, a: __m128) {
18697
18697
vcompressstoreps128(base_addr as *mut _, a.as_f32x4(), k)
18698
18698
}
18699
18699
@@ -18704,7 +18704,7 @@ pub unsafe fn _mm_mask_compressstoreu_ps(base_addr: *mut u8, k: __mmask8, a: __m
18704
18704
#[target_feature(enable = "avx512f")]
18705
18705
#[unstable(feature = "stdarch_x86_avx512", issue = "111137")]
18706
18706
#[cfg_attr(test, assert_instr(vcompresspd))]
18707
- pub unsafe fn _mm512_mask_compressstoreu_pd(base_addr: *mut u8 , k: __mmask8, a: __m512d) {
18707
+ pub unsafe fn _mm512_mask_compressstoreu_pd(base_addr: *mut f64 , k: __mmask8, a: __m512d) {
18708
18708
vcompressstorepd(base_addr as *mut _, a.as_f64x8(), k)
18709
18709
}
18710
18710
@@ -18715,7 +18715,7 @@ pub unsafe fn _mm512_mask_compressstoreu_pd(base_addr: *mut u8, k: __mmask8, a:
18715
18715
#[target_feature(enable = "avx512f,avx512vl")]
18716
18716
#[unstable(feature = "stdarch_x86_avx512", issue = "111137")]
18717
18717
#[cfg_attr(test, assert_instr(vcompresspd))]
18718
- pub unsafe fn _mm256_mask_compressstoreu_pd(base_addr: *mut u8 , k: __mmask8, a: __m256d) {
18718
+ pub unsafe fn _mm256_mask_compressstoreu_pd(base_addr: *mut f64 , k: __mmask8, a: __m256d) {
18719
18719
vcompressstorepd256(base_addr as *mut _, a.as_f64x4(), k)
18720
18720
}
18721
18721
@@ -18726,7 +18726,7 @@ pub unsafe fn _mm256_mask_compressstoreu_pd(base_addr: *mut u8, k: __mmask8, a:
18726
18726
#[target_feature(enable = "avx512f,avx512vl")]
18727
18727
#[unstable(feature = "stdarch_x86_avx512", issue = "111137")]
18728
18728
#[cfg_attr(test, assert_instr(vcompresspd))]
18729
- pub unsafe fn _mm_mask_compressstoreu_pd(base_addr: *mut u8 , k: __mmask8, a: __m128d) {
18729
+ pub unsafe fn _mm_mask_compressstoreu_pd(base_addr: *mut f64 , k: __mmask8, a: __m128d) {
18730
18730
vcompressstorepd128(base_addr as *mut _, a.as_f64x2(), k)
18731
18731
}
18732
18732
@@ -56718,59 +56718,59 @@ mod tests {
56718
56718
unsafe fn test_mm512_mask_compressstoreu_epi32() {
56719
56719
let a = _mm512_setr_epi32(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16);
56720
56720
let mut r = [0_i32; 16];
56721
- _mm512_mask_compressstoreu_epi32(r.as_mut_ptr() as *mut _ , 0, a);
56721
+ _mm512_mask_compressstoreu_epi32(r.as_mut_ptr(), 0, a);
56722
56722
assert_eq!(&r, &[0_i32; 16]);
56723
- _mm512_mask_compressstoreu_epi32(r.as_mut_ptr() as *mut _ , 0b1111000011001010, a);
56723
+ _mm512_mask_compressstoreu_epi32(r.as_mut_ptr(), 0b1111000011001010, a);
56724
56724
assert_eq!(&r, &[2, 4, 7, 8, 13, 14, 15, 16, 0, 0, 0, 0, 0, 0, 0, 0]);
56725
56725
}
56726
56726
56727
56727
#[simd_test(enable = "avx512f,avx512vl")]
56728
56728
unsafe fn test_mm256_mask_compressstoreu_epi32() {
56729
56729
let a = _mm256_setr_epi32(1, 2, 3, 4, 5, 6, 7, 8);
56730
56730
let mut r = [0_i32; 8];
56731
- _mm256_mask_compressstoreu_epi32(r.as_mut_ptr() as *mut _ , 0, a);
56731
+ _mm256_mask_compressstoreu_epi32(r.as_mut_ptr(), 0, a);
56732
56732
assert_eq!(&r, &[0_i32; 8]);
56733
- _mm256_mask_compressstoreu_epi32(r.as_mut_ptr() as *mut _ , 0b11001010, a);
56733
+ _mm256_mask_compressstoreu_epi32(r.as_mut_ptr(), 0b11001010, a);
56734
56734
assert_eq!(&r, &[2, 4, 7, 8, 0, 0, 0, 0]);
56735
56735
}
56736
56736
56737
56737
#[simd_test(enable = "avx512f,avx512vl")]
56738
56738
unsafe fn test_mm_mask_compressstoreu_epi32() {
56739
56739
let a = _mm_setr_epi32(1, 2, 3, 4);
56740
56740
let mut r = [0_i32; 4];
56741
- _mm_mask_compressstoreu_epi32(r.as_mut_ptr() as *mut _ , 0, a);
56741
+ _mm_mask_compressstoreu_epi32(r.as_mut_ptr(), 0, a);
56742
56742
assert_eq!(&r, &[0_i32; 4]);
56743
- _mm_mask_compressstoreu_epi32(r.as_mut_ptr() as *mut _ , 0b1011, a);
56743
+ _mm_mask_compressstoreu_epi32(r.as_mut_ptr(), 0b1011, a);
56744
56744
assert_eq!(&r, &[1, 2, 4, 0]);
56745
56745
}
56746
56746
56747
56747
#[simd_test(enable = "avx512f")]
56748
56748
unsafe fn test_mm512_mask_compressstoreu_epi64() {
56749
56749
let a = _mm512_setr_epi64(1, 2, 3, 4, 5, 6, 7, 8);
56750
56750
let mut r = [0_i64; 8];
56751
- _mm512_mask_compressstoreu_epi64(r.as_mut_ptr() as *mut _ , 0, a);
56751
+ _mm512_mask_compressstoreu_epi64(r.as_mut_ptr(), 0, a);
56752
56752
assert_eq!(&r, &[0_i64; 8]);
56753
- _mm512_mask_compressstoreu_epi64(r.as_mut_ptr() as *mut _ , 0b11001010, a);
56753
+ _mm512_mask_compressstoreu_epi64(r.as_mut_ptr(), 0b11001010, a);
56754
56754
assert_eq!(&r, &[2, 4, 7, 8, 0, 0, 0, 0]);
56755
56755
}
56756
56756
56757
56757
#[simd_test(enable = "avx512f,avx512vl")]
56758
56758
unsafe fn test_mm256_mask_compressstoreu_epi64() {
56759
56759
let a = _mm256_setr_epi64x(1, 2, 3, 4);
56760
56760
let mut r = [0_i64; 4];
56761
- _mm256_mask_compressstoreu_epi64(r.as_mut_ptr() as *mut _ , 0, a);
56761
+ _mm256_mask_compressstoreu_epi64(r.as_mut_ptr(), 0, a);
56762
56762
assert_eq!(&r, &[0_i64; 4]);
56763
- _mm256_mask_compressstoreu_epi64(r.as_mut_ptr() as *mut _ , 0b1011, a);
56763
+ _mm256_mask_compressstoreu_epi64(r.as_mut_ptr(), 0b1011, a);
56764
56764
assert_eq!(&r, &[1, 2, 4, 0]);
56765
56765
}
56766
56766
56767
56767
#[simd_test(enable = "avx512f,avx512vl")]
56768
56768
unsafe fn test_mm_mask_compressstoreu_epi64() {
56769
56769
let a = _mm_setr_epi64x(1, 2);
56770
56770
let mut r = [0_i64; 2];
56771
- _mm_mask_compressstoreu_epi64(r.as_mut_ptr() as *mut _ , 0, a);
56771
+ _mm_mask_compressstoreu_epi64(r.as_mut_ptr(), 0, a);
56772
56772
assert_eq!(&r, &[0_i64; 2]);
56773
- _mm_mask_compressstoreu_epi64(r.as_mut_ptr() as *mut _ , 0b10, a);
56773
+ _mm_mask_compressstoreu_epi64(r.as_mut_ptr(), 0b10, a);
56774
56774
assert_eq!(&r, &[2, 0]);
56775
56775
}
56776
56776
@@ -56781,9 +56781,9 @@ mod tests {
56781
56781
13_f32, 14_f32, 15_f32, 16_f32,
56782
56782
);
56783
56783
let mut r = [0_f32; 16];
56784
- _mm512_mask_compressstoreu_ps(r.as_mut_ptr() as *mut _ , 0, a);
56784
+ _mm512_mask_compressstoreu_ps(r.as_mut_ptr(), 0, a);
56785
56785
assert_eq!(&r, &[0_f32; 16]);
56786
- _mm512_mask_compressstoreu_ps(r.as_mut_ptr() as *mut _ , 0b1111000011001010, a);
56786
+ _mm512_mask_compressstoreu_ps(r.as_mut_ptr(), 0b1111000011001010, a);
56787
56787
assert_eq!(
56788
56788
&r,
56789
56789
&[
@@ -56797,9 +56797,9 @@ mod tests {
56797
56797
unsafe fn test_mm256_mask_compressstoreu_ps() {
56798
56798
let a = _mm256_setr_ps(1_f32, 2_f32, 3_f32, 4_f32, 5_f32, 6_f32, 7_f32, 8_f32);
56799
56799
let mut r = [0_f32; 8];
56800
- _mm256_mask_compressstoreu_ps(r.as_mut_ptr() as *mut _ , 0, a);
56800
+ _mm256_mask_compressstoreu_ps(r.as_mut_ptr(), 0, a);
56801
56801
assert_eq!(&r, &[0_f32; 8]);
56802
- _mm256_mask_compressstoreu_ps(r.as_mut_ptr() as *mut _ , 0b11001010, a);
56802
+ _mm256_mask_compressstoreu_ps(r.as_mut_ptr(), 0b11001010, a);
56803
56803
assert_eq!(
56804
56804
&r,
56805
56805
&[2_f32, 4_f32, 7_f32, 8_f32, 0_f32, 0_f32, 0_f32, 0_f32]
@@ -56810,39 +56810,39 @@ mod tests {
56810
56810
unsafe fn test_mm_mask_compressstoreu_ps() {
56811
56811
let a = _mm_setr_ps(1_f32, 2_f32, 3_f32, 4_f32);
56812
56812
let mut r = [0.; 4];
56813
- _mm_mask_compressstoreu_ps(r.as_mut_ptr() as *mut _ , 0, a);
56813
+ _mm_mask_compressstoreu_ps(r.as_mut_ptr(), 0, a);
56814
56814
assert_eq!(&r, &[0.; 4]);
56815
- _mm_mask_compressstoreu_ps(r.as_mut_ptr() as *mut _ , 0b1011, a);
56815
+ _mm_mask_compressstoreu_ps(r.as_mut_ptr(), 0b1011, a);
56816
56816
assert_eq!(&r, &[1_f32, 2_f32, 4_f32, 0_f32]);
56817
56817
}
56818
56818
56819
56819
#[simd_test(enable = "avx512f")]
56820
56820
unsafe fn test_mm512_mask_compressstoreu_pd() {
56821
56821
let a = _mm512_setr_pd(1., 2., 3., 4., 5., 6., 7., 8.);
56822
56822
let mut r = [0.; 8];
56823
- _mm512_mask_compressstoreu_pd(r.as_mut_ptr() as *mut _ , 0, a);
56823
+ _mm512_mask_compressstoreu_pd(r.as_mut_ptr(), 0, a);
56824
56824
assert_eq!(&r, &[0.; 8]);
56825
- _mm512_mask_compressstoreu_pd(r.as_mut_ptr() as *mut _ , 0b11001010, a);
56825
+ _mm512_mask_compressstoreu_pd(r.as_mut_ptr(), 0b11001010, a);
56826
56826
assert_eq!(&r, &[2., 4., 7., 8., 0., 0., 0., 0.]);
56827
56827
}
56828
56828
56829
56829
#[simd_test(enable = "avx512f,avx512vl")]
56830
56830
unsafe fn test_mm256_mask_compressstoreu_pd() {
56831
56831
let a = _mm256_setr_pd(1., 2., 3., 4.);
56832
56832
let mut r = [0.; 4];
56833
- _mm256_mask_compressstoreu_pd(r.as_mut_ptr() as *mut _ , 0, a);
56833
+ _mm256_mask_compressstoreu_pd(r.as_mut_ptr(), 0, a);
56834
56834
assert_eq!(&r, &[0.; 4]);
56835
- _mm256_mask_compressstoreu_pd(r.as_mut_ptr() as *mut _ , 0b1011, a);
56835
+ _mm256_mask_compressstoreu_pd(r.as_mut_ptr(), 0b1011, a);
56836
56836
assert_eq!(&r, &[1., 2., 4., 0.]);
56837
56837
}
56838
56838
56839
56839
#[simd_test(enable = "avx512f,avx512vl")]
56840
56840
unsafe fn test_mm_mask_compressstoreu_pd() {
56841
56841
let a = _mm_setr_pd(1., 2.);
56842
56842
let mut r = [0.; 2];
56843
- _mm_mask_compressstoreu_pd(r.as_mut_ptr() as *mut _ , 0, a);
56843
+ _mm_mask_compressstoreu_pd(r.as_mut_ptr(), 0, a);
56844
56844
assert_eq!(&r, &[0.; 2]);
56845
- _mm_mask_compressstoreu_pd(r.as_mut_ptr() as *mut _ , 0b10, a);
56845
+ _mm_mask_compressstoreu_pd(r.as_mut_ptr(), 0b10, a);
56846
56846
assert_eq!(&r, &[2., 0.]);
56847
56847
}
56848
56848
0 commit comments