Skip to content

Commit 41cde2b

Browse files
committed
Change void* type for compressstore intrinsics
1 parent cef1ecb commit 41cde2b

File tree

2 files changed

+57
-65
lines changed

2 files changed

+57
-65
lines changed

crates/core_arch/src/x86/avx512f.rs

Lines changed: 36 additions & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -18605,7 +18605,7 @@ pub fn _mm_maskz_compress_pd(k: __mmask8, a: __m128d) -> __m128d {
1860518605
#[target_feature(enable = "avx512f")]
1860618606
#[unstable(feature = "stdarch_x86_avx512", issue = "111137")]
1860718607
#[cfg_attr(test, assert_instr(vpcompressd))]
18608-
pub unsafe fn _mm512_mask_compressstoreu_epi32(base_addr: *mut u8, k: __mmask16, a: __m512i) {
18608+
pub unsafe fn _mm512_mask_compressstoreu_epi32(base_addr: *mut i32, k: __mmask16, a: __m512i) {
1860918609
vcompressstored(base_addr as *mut _, a.as_i32x16(), k)
1861018610
}
1861118611

@@ -18616,7 +18616,7 @@ pub unsafe fn _mm512_mask_compressstoreu_epi32(base_addr: *mut u8, k: __mmask16,
1861618616
#[target_feature(enable = "avx512f,avx512vl")]
1861718617
#[unstable(feature = "stdarch_x86_avx512", issue = "111137")]
1861818618
#[cfg_attr(test, assert_instr(vpcompressd))]
18619-
pub unsafe fn _mm256_mask_compressstoreu_epi32(base_addr: *mut u8, k: __mmask8, a: __m256i) {
18619+
pub unsafe fn _mm256_mask_compressstoreu_epi32(base_addr: *mut i32, k: __mmask8, a: __m256i) {
1862018620
vcompressstored256(base_addr as *mut _, a.as_i32x8(), k)
1862118621
}
1862218622

@@ -18627,7 +18627,7 @@ pub unsafe fn _mm256_mask_compressstoreu_epi32(base_addr: *mut u8, k: __mmask8,
1862718627
#[target_feature(enable = "avx512f,avx512vl")]
1862818628
#[unstable(feature = "stdarch_x86_avx512", issue = "111137")]
1862918629
#[cfg_attr(test, assert_instr(vpcompressd))]
18630-
pub unsafe fn _mm_mask_compressstoreu_epi32(base_addr: *mut u8, k: __mmask8, a: __m128i) {
18630+
pub unsafe fn _mm_mask_compressstoreu_epi32(base_addr: *mut i32, k: __mmask8, a: __m128i) {
1863118631
vcompressstored128(base_addr as *mut _, a.as_i32x4(), k)
1863218632
}
1863318633

@@ -18638,7 +18638,7 @@ pub unsafe fn _mm_mask_compressstoreu_epi32(base_addr: *mut u8, k: __mmask8, a:
1863818638
#[target_feature(enable = "avx512f")]
1863918639
#[unstable(feature = "stdarch_x86_avx512", issue = "111137")]
1864018640
#[cfg_attr(test, assert_instr(vpcompressq))]
18641-
pub unsafe fn _mm512_mask_compressstoreu_epi64(base_addr: *mut u8, k: __mmask8, a: __m512i) {
18641+
pub unsafe fn _mm512_mask_compressstoreu_epi64(base_addr: *mut i64, k: __mmask8, a: __m512i) {
1864218642
vcompressstoreq(base_addr as *mut _, a.as_i64x8(), k)
1864318643
}
1864418644

@@ -18649,7 +18649,7 @@ pub unsafe fn _mm512_mask_compressstoreu_epi64(base_addr: *mut u8, k: __mmask8,
1864918649
#[target_feature(enable = "avx512f,avx512vl")]
1865018650
#[unstable(feature = "stdarch_x86_avx512", issue = "111137")]
1865118651
#[cfg_attr(test, assert_instr(vpcompressq))]
18652-
pub unsafe fn _mm256_mask_compressstoreu_epi64(base_addr: *mut u8, k: __mmask8, a: __m256i) {
18652+
pub unsafe fn _mm256_mask_compressstoreu_epi64(base_addr: *mut i64, k: __mmask8, a: __m256i) {
1865318653
vcompressstoreq256(base_addr as *mut _, a.as_i64x4(), k)
1865418654
}
1865518655

@@ -18660,7 +18660,7 @@ pub unsafe fn _mm256_mask_compressstoreu_epi64(base_addr: *mut u8, k: __mmask8,
1866018660
#[target_feature(enable = "avx512f,avx512vl")]
1866118661
#[unstable(feature = "stdarch_x86_avx512", issue = "111137")]
1866218662
#[cfg_attr(test, assert_instr(vpcompressq))]
18663-
pub unsafe fn _mm_mask_compressstoreu_epi64(base_addr: *mut u8, k: __mmask8, a: __m128i) {
18663+
pub unsafe fn _mm_mask_compressstoreu_epi64(base_addr: *mut i64, k: __mmask8, a: __m128i) {
1866418664
vcompressstoreq128(base_addr as *mut _, a.as_i64x2(), k)
1866518665
}
1866618666

@@ -18671,7 +18671,7 @@ pub unsafe fn _mm_mask_compressstoreu_epi64(base_addr: *mut u8, k: __mmask8, a:
1867118671
#[target_feature(enable = "avx512f")]
1867218672
#[unstable(feature = "stdarch_x86_avx512", issue = "111137")]
1867318673
#[cfg_attr(test, assert_instr(vcompressps))]
18674-
pub unsafe fn _mm512_mask_compressstoreu_ps(base_addr: *mut u8, k: __mmask16, a: __m512) {
18674+
pub unsafe fn _mm512_mask_compressstoreu_ps(base_addr: *mut f32, k: __mmask16, a: __m512) {
1867518675
vcompressstoreps(base_addr as *mut _, a.as_f32x16(), k)
1867618676
}
1867718677

@@ -18682,7 +18682,7 @@ pub unsafe fn _mm512_mask_compressstoreu_ps(base_addr: *mut u8, k: __mmask16, a:
1868218682
#[target_feature(enable = "avx512f,avx512vl")]
1868318683
#[unstable(feature = "stdarch_x86_avx512", issue = "111137")]
1868418684
#[cfg_attr(test, assert_instr(vcompressps))]
18685-
pub unsafe fn _mm256_mask_compressstoreu_ps(base_addr: *mut u8, k: __mmask8, a: __m256) {
18685+
pub unsafe fn _mm256_mask_compressstoreu_ps(base_addr: *mut f32, k: __mmask8, a: __m256) {
1868618686
vcompressstoreps256(base_addr as *mut _, a.as_f32x8(), k)
1868718687
}
1868818688

@@ -18693,7 +18693,7 @@ pub unsafe fn _mm256_mask_compressstoreu_ps(base_addr: *mut u8, k: __mmask8, a:
1869318693
#[target_feature(enable = "avx512f,avx512vl")]
1869418694
#[unstable(feature = "stdarch_x86_avx512", issue = "111137")]
1869518695
#[cfg_attr(test, assert_instr(vcompressps))]
18696-
pub unsafe fn _mm_mask_compressstoreu_ps(base_addr: *mut u8, k: __mmask8, a: __m128) {
18696+
pub unsafe fn _mm_mask_compressstoreu_ps(base_addr: *mut f32, k: __mmask8, a: __m128) {
1869718697
vcompressstoreps128(base_addr as *mut _, a.as_f32x4(), k)
1869818698
}
1869918699

@@ -18704,7 +18704,7 @@ pub unsafe fn _mm_mask_compressstoreu_ps(base_addr: *mut u8, k: __mmask8, a: __m
1870418704
#[target_feature(enable = "avx512f")]
1870518705
#[unstable(feature = "stdarch_x86_avx512", issue = "111137")]
1870618706
#[cfg_attr(test, assert_instr(vcompresspd))]
18707-
pub unsafe fn _mm512_mask_compressstoreu_pd(base_addr: *mut u8, k: __mmask8, a: __m512d) {
18707+
pub unsafe fn _mm512_mask_compressstoreu_pd(base_addr: *mut f64, k: __mmask8, a: __m512d) {
1870818708
vcompressstorepd(base_addr as *mut _, a.as_f64x8(), k)
1870918709
}
1871018710

@@ -18715,7 +18715,7 @@ pub unsafe fn _mm512_mask_compressstoreu_pd(base_addr: *mut u8, k: __mmask8, a:
1871518715
#[target_feature(enable = "avx512f,avx512vl")]
1871618716
#[unstable(feature = "stdarch_x86_avx512", issue = "111137")]
1871718717
#[cfg_attr(test, assert_instr(vcompresspd))]
18718-
pub unsafe fn _mm256_mask_compressstoreu_pd(base_addr: *mut u8, k: __mmask8, a: __m256d) {
18718+
pub unsafe fn _mm256_mask_compressstoreu_pd(base_addr: *mut f64, k: __mmask8, a: __m256d) {
1871918719
vcompressstorepd256(base_addr as *mut _, a.as_f64x4(), k)
1872018720
}
1872118721

@@ -18726,7 +18726,7 @@ pub unsafe fn _mm256_mask_compressstoreu_pd(base_addr: *mut u8, k: __mmask8, a:
1872618726
#[target_feature(enable = "avx512f,avx512vl")]
1872718727
#[unstable(feature = "stdarch_x86_avx512", issue = "111137")]
1872818728
#[cfg_attr(test, assert_instr(vcompresspd))]
18729-
pub unsafe fn _mm_mask_compressstoreu_pd(base_addr: *mut u8, k: __mmask8, a: __m128d) {
18729+
pub unsafe fn _mm_mask_compressstoreu_pd(base_addr: *mut f64, k: __mmask8, a: __m128d) {
1873018730
vcompressstorepd128(base_addr as *mut _, a.as_f64x2(), k)
1873118731
}
1873218732

@@ -56718,59 +56718,59 @@ mod tests {
5671856718
unsafe fn test_mm512_mask_compressstoreu_epi32() {
5671956719
let a = _mm512_setr_epi32(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16);
5672056720
let mut r = [0_i32; 16];
56721-
_mm512_mask_compressstoreu_epi32(r.as_mut_ptr() as *mut _, 0, a);
56721+
_mm512_mask_compressstoreu_epi32(r.as_mut_ptr(), 0, a);
5672256722
assert_eq!(&r, &[0_i32; 16]);
56723-
_mm512_mask_compressstoreu_epi32(r.as_mut_ptr() as *mut _, 0b1111000011001010, a);
56723+
_mm512_mask_compressstoreu_epi32(r.as_mut_ptr(), 0b1111000011001010, a);
5672456724
assert_eq!(&r, &[2, 4, 7, 8, 13, 14, 15, 16, 0, 0, 0, 0, 0, 0, 0, 0]);
5672556725
}
5672656726

5672756727
#[simd_test(enable = "avx512f,avx512vl")]
5672856728
unsafe fn test_mm256_mask_compressstoreu_epi32() {
5672956729
let a = _mm256_setr_epi32(1, 2, 3, 4, 5, 6, 7, 8);
5673056730
let mut r = [0_i32; 8];
56731-
_mm256_mask_compressstoreu_epi32(r.as_mut_ptr() as *mut _, 0, a);
56731+
_mm256_mask_compressstoreu_epi32(r.as_mut_ptr(), 0, a);
5673256732
assert_eq!(&r, &[0_i32; 8]);
56733-
_mm256_mask_compressstoreu_epi32(r.as_mut_ptr() as *mut _, 0b11001010, a);
56733+
_mm256_mask_compressstoreu_epi32(r.as_mut_ptr(), 0b11001010, a);
5673456734
assert_eq!(&r, &[2, 4, 7, 8, 0, 0, 0, 0]);
5673556735
}
5673656736

5673756737
#[simd_test(enable = "avx512f,avx512vl")]
5673856738
unsafe fn test_mm_mask_compressstoreu_epi32() {
5673956739
let a = _mm_setr_epi32(1, 2, 3, 4);
5674056740
let mut r = [0_i32; 4];
56741-
_mm_mask_compressstoreu_epi32(r.as_mut_ptr() as *mut _, 0, a);
56741+
_mm_mask_compressstoreu_epi32(r.as_mut_ptr(), 0, a);
5674256742
assert_eq!(&r, &[0_i32; 4]);
56743-
_mm_mask_compressstoreu_epi32(r.as_mut_ptr() as *mut _, 0b1011, a);
56743+
_mm_mask_compressstoreu_epi32(r.as_mut_ptr(), 0b1011, a);
5674456744
assert_eq!(&r, &[1, 2, 4, 0]);
5674556745
}
5674656746

5674756747
#[simd_test(enable = "avx512f")]
5674856748
unsafe fn test_mm512_mask_compressstoreu_epi64() {
5674956749
let a = _mm512_setr_epi64(1, 2, 3, 4, 5, 6, 7, 8);
5675056750
let mut r = [0_i64; 8];
56751-
_mm512_mask_compressstoreu_epi64(r.as_mut_ptr() as *mut _, 0, a);
56751+
_mm512_mask_compressstoreu_epi64(r.as_mut_ptr(), 0, a);
5675256752
assert_eq!(&r, &[0_i64; 8]);
56753-
_mm512_mask_compressstoreu_epi64(r.as_mut_ptr() as *mut _, 0b11001010, a);
56753+
_mm512_mask_compressstoreu_epi64(r.as_mut_ptr(), 0b11001010, a);
5675456754
assert_eq!(&r, &[2, 4, 7, 8, 0, 0, 0, 0]);
5675556755
}
5675656756

5675756757
#[simd_test(enable = "avx512f,avx512vl")]
5675856758
unsafe fn test_mm256_mask_compressstoreu_epi64() {
5675956759
let a = _mm256_setr_epi64x(1, 2, 3, 4);
5676056760
let mut r = [0_i64; 4];
56761-
_mm256_mask_compressstoreu_epi64(r.as_mut_ptr() as *mut _, 0, a);
56761+
_mm256_mask_compressstoreu_epi64(r.as_mut_ptr(), 0, a);
5676256762
assert_eq!(&r, &[0_i64; 4]);
56763-
_mm256_mask_compressstoreu_epi64(r.as_mut_ptr() as *mut _, 0b1011, a);
56763+
_mm256_mask_compressstoreu_epi64(r.as_mut_ptr(), 0b1011, a);
5676456764
assert_eq!(&r, &[1, 2, 4, 0]);
5676556765
}
5676656766

5676756767
#[simd_test(enable = "avx512f,avx512vl")]
5676856768
unsafe fn test_mm_mask_compressstoreu_epi64() {
5676956769
let a = _mm_setr_epi64x(1, 2);
5677056770
let mut r = [0_i64; 2];
56771-
_mm_mask_compressstoreu_epi64(r.as_mut_ptr() as *mut _, 0, a);
56771+
_mm_mask_compressstoreu_epi64(r.as_mut_ptr(), 0, a);
5677256772
assert_eq!(&r, &[0_i64; 2]);
56773-
_mm_mask_compressstoreu_epi64(r.as_mut_ptr() as *mut _, 0b10, a);
56773+
_mm_mask_compressstoreu_epi64(r.as_mut_ptr(), 0b10, a);
5677456774
assert_eq!(&r, &[2, 0]);
5677556775
}
5677656776

@@ -56781,9 +56781,9 @@ mod tests {
5678156781
13_f32, 14_f32, 15_f32, 16_f32,
5678256782
);
5678356783
let mut r = [0_f32; 16];
56784-
_mm512_mask_compressstoreu_ps(r.as_mut_ptr() as *mut _, 0, a);
56784+
_mm512_mask_compressstoreu_ps(r.as_mut_ptr(), 0, a);
5678556785
assert_eq!(&r, &[0_f32; 16]);
56786-
_mm512_mask_compressstoreu_ps(r.as_mut_ptr() as *mut _, 0b1111000011001010, a);
56786+
_mm512_mask_compressstoreu_ps(r.as_mut_ptr(), 0b1111000011001010, a);
5678756787
assert_eq!(
5678856788
&r,
5678956789
&[
@@ -56797,9 +56797,9 @@ mod tests {
5679756797
unsafe fn test_mm256_mask_compressstoreu_ps() {
5679856798
let a = _mm256_setr_ps(1_f32, 2_f32, 3_f32, 4_f32, 5_f32, 6_f32, 7_f32, 8_f32);
5679956799
let mut r = [0_f32; 8];
56800-
_mm256_mask_compressstoreu_ps(r.as_mut_ptr() as *mut _, 0, a);
56800+
_mm256_mask_compressstoreu_ps(r.as_mut_ptr(), 0, a);
5680156801
assert_eq!(&r, &[0_f32; 8]);
56802-
_mm256_mask_compressstoreu_ps(r.as_mut_ptr() as *mut _, 0b11001010, a);
56802+
_mm256_mask_compressstoreu_ps(r.as_mut_ptr(), 0b11001010, a);
5680356803
assert_eq!(
5680456804
&r,
5680556805
&[2_f32, 4_f32, 7_f32, 8_f32, 0_f32, 0_f32, 0_f32, 0_f32]
@@ -56810,39 +56810,39 @@ mod tests {
5681056810
unsafe fn test_mm_mask_compressstoreu_ps() {
5681156811
let a = _mm_setr_ps(1_f32, 2_f32, 3_f32, 4_f32);
5681256812
let mut r = [0.; 4];
56813-
_mm_mask_compressstoreu_ps(r.as_mut_ptr() as *mut _, 0, a);
56813+
_mm_mask_compressstoreu_ps(r.as_mut_ptr(), 0, a);
5681456814
assert_eq!(&r, &[0.; 4]);
56815-
_mm_mask_compressstoreu_ps(r.as_mut_ptr() as *mut _, 0b1011, a);
56815+
_mm_mask_compressstoreu_ps(r.as_mut_ptr(), 0b1011, a);
5681656816
assert_eq!(&r, &[1_f32, 2_f32, 4_f32, 0_f32]);
5681756817
}
5681856818

5681956819
#[simd_test(enable = "avx512f")]
5682056820
unsafe fn test_mm512_mask_compressstoreu_pd() {
5682156821
let a = _mm512_setr_pd(1., 2., 3., 4., 5., 6., 7., 8.);
5682256822
let mut r = [0.; 8];
56823-
_mm512_mask_compressstoreu_pd(r.as_mut_ptr() as *mut _, 0, a);
56823+
_mm512_mask_compressstoreu_pd(r.as_mut_ptr(), 0, a);
5682456824
assert_eq!(&r, &[0.; 8]);
56825-
_mm512_mask_compressstoreu_pd(r.as_mut_ptr() as *mut _, 0b11001010, a);
56825+
_mm512_mask_compressstoreu_pd(r.as_mut_ptr(), 0b11001010, a);
5682656826
assert_eq!(&r, &[2., 4., 7., 8., 0., 0., 0., 0.]);
5682756827
}
5682856828

5682956829
#[simd_test(enable = "avx512f,avx512vl")]
5683056830
unsafe fn test_mm256_mask_compressstoreu_pd() {
5683156831
let a = _mm256_setr_pd(1., 2., 3., 4.);
5683256832
let mut r = [0.; 4];
56833-
_mm256_mask_compressstoreu_pd(r.as_mut_ptr() as *mut _, 0, a);
56833+
_mm256_mask_compressstoreu_pd(r.as_mut_ptr(), 0, a);
5683456834
assert_eq!(&r, &[0.; 4]);
56835-
_mm256_mask_compressstoreu_pd(r.as_mut_ptr() as *mut _, 0b1011, a);
56835+
_mm256_mask_compressstoreu_pd(r.as_mut_ptr(), 0b1011, a);
5683656836
assert_eq!(&r, &[1., 2., 4., 0.]);
5683756837
}
5683856838

5683956839
#[simd_test(enable = "avx512f,avx512vl")]
5684056840
unsafe fn test_mm_mask_compressstoreu_pd() {
5684156841
let a = _mm_setr_pd(1., 2.);
5684256842
let mut r = [0.; 2];
56843-
_mm_mask_compressstoreu_pd(r.as_mut_ptr() as *mut _, 0, a);
56843+
_mm_mask_compressstoreu_pd(r.as_mut_ptr(), 0, a);
5684456844
assert_eq!(&r, &[0.; 2]);
56845-
_mm_mask_compressstoreu_pd(r.as_mut_ptr() as *mut _, 0b10, a);
56845+
_mm_mask_compressstoreu_pd(r.as_mut_ptr(), 0b10, a);
5684656846
assert_eq!(&r, &[2., 0.]);
5684756847
}
5684856848

crates/core_arch/src/x86/avx512vbmi2.rs

Lines changed: 21 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -169,7 +169,7 @@ pub unsafe fn _mm_maskz_expandloadu_epi8(k: __mmask16, mem_addr: *const i8) -> _
169169
#[target_feature(enable = "avx512vbmi2")]
170170
#[unstable(feature = "stdarch_x86_avx512", issue = "111137")]
171171
#[cfg_attr(test, assert_instr(vpcompressw))]
172-
pub unsafe fn _mm512_mask_compressstoreu_epi16(base_addr: *mut u8, k: __mmask32, a: __m512i) {
172+
pub unsafe fn _mm512_mask_compressstoreu_epi16(base_addr: *mut i16, k: __mmask32, a: __m512i) {
173173
vcompressstorew(base_addr as *mut _, a.as_i16x32(), k)
174174
}
175175

@@ -180,7 +180,7 @@ pub unsafe fn _mm512_mask_compressstoreu_epi16(base_addr: *mut u8, k: __mmask32,
180180
#[target_feature(enable = "avx512vbmi2,avx512vl")]
181181
#[unstable(feature = "stdarch_x86_avx512", issue = "111137")]
182182
#[cfg_attr(test, assert_instr(vpcompressw))]
183-
pub unsafe fn _mm256_mask_compressstoreu_epi16(base_addr: *mut u8, k: __mmask16, a: __m256i) {
183+
pub unsafe fn _mm256_mask_compressstoreu_epi16(base_addr: *mut i16, k: __mmask16, a: __m256i) {
184184
vcompressstorew256(base_addr as *mut _, a.as_i16x16(), k)
185185
}
186186

@@ -191,7 +191,7 @@ pub unsafe fn _mm256_mask_compressstoreu_epi16(base_addr: *mut u8, k: __mmask16,
191191
#[target_feature(enable = "avx512vbmi2,avx512vl")]
192192
#[unstable(feature = "stdarch_x86_avx512", issue = "111137")]
193193
#[cfg_attr(test, assert_instr(vpcompressw))]
194-
pub unsafe fn _mm_mask_compressstoreu_epi16(base_addr: *mut u8, k: __mmask8, a: __m128i) {
194+
pub unsafe fn _mm_mask_compressstoreu_epi16(base_addr: *mut i16, k: __mmask8, a: __m128i) {
195195
vcompressstorew128(base_addr as *mut _, a.as_i16x8(), k)
196196
}
197197

@@ -202,8 +202,8 @@ pub unsafe fn _mm_mask_compressstoreu_epi16(base_addr: *mut u8, k: __mmask8, a:
202202
#[target_feature(enable = "avx512vbmi2")]
203203
#[unstable(feature = "stdarch_x86_avx512", issue = "111137")]
204204
#[cfg_attr(test, assert_instr(vpcompressb))]
205-
pub unsafe fn _mm512_mask_compressstoreu_epi8(base_addr: *mut u8, k: __mmask64, a: __m512i) {
206-
vcompressstoreb(base_addr as *mut _, a.as_i8x64(), k)
205+
pub unsafe fn _mm512_mask_compressstoreu_epi8(base_addr: *mut i8, k: __mmask64, a: __m512i) {
206+
vcompressstoreb(base_addr, a.as_i8x64(), k)
207207
}
208208

209209
/// Contiguously store the active 8-bit integers in a (those with their respective bit set in writemask k) to unaligned memory at base_addr.
@@ -213,8 +213,8 @@ pub unsafe fn _mm512_mask_compressstoreu_epi8(base_addr: *mut u8, k: __mmask64,
213213
#[target_feature(enable = "avx512vbmi2,avx512vl")]
214214
#[unstable(feature = "stdarch_x86_avx512", issue = "111137")]
215215
#[cfg_attr(test, assert_instr(vpcompressb))]
216-
pub unsafe fn _mm256_mask_compressstoreu_epi8(base_addr: *mut u8, k: __mmask32, a: __m256i) {
217-
vcompressstoreb256(base_addr as *mut _, a.as_i8x32(), k)
216+
pub unsafe fn _mm256_mask_compressstoreu_epi8(base_addr: *mut i8, k: __mmask32, a: __m256i) {
217+
vcompressstoreb256(base_addr, a.as_i8x32(), k)
218218
}
219219

220220
/// Contiguously store the active 8-bit integers in a (those with their respective bit set in writemask k) to unaligned memory at base_addr.
@@ -224,8 +224,8 @@ pub unsafe fn _mm256_mask_compressstoreu_epi8(base_addr: *mut u8, k: __mmask32,
224224
#[target_feature(enable = "avx512vbmi2,avx512vl")]
225225
#[unstable(feature = "stdarch_x86_avx512", issue = "111137")]
226226
#[cfg_attr(test, assert_instr(vpcompressb))]
227-
pub unsafe fn _mm_mask_compressstoreu_epi8(base_addr: *mut u8, k: __mmask16, a: __m128i) {
228-
vcompressstoreb128(base_addr as *mut _, a.as_i8x16(), k)
227+
pub unsafe fn _mm_mask_compressstoreu_epi8(base_addr: *mut i8, k: __mmask16, a: __m128i) {
228+
vcompressstoreb128(base_addr, a.as_i8x16(), k)
229229
}
230230

231231
/// Contiguously store the active 16-bit integers in a (those with their respective bit set in writemask k) to dst, and pass through the remaining elements from src.
@@ -3853,13 +3853,9 @@ mod tests {
38533853
10, 9, 8, 7, 6, 5, 4, 3, 2, 1,
38543854
);
38553855
let mut r = [0_i16; 32];
3856-
_mm512_mask_compressstoreu_epi16(r.as_mut_ptr() as *mut _, 0, a);
3856+
_mm512_mask_compressstoreu_epi16(r.as_mut_ptr(), 0, a);
38573857
assert_eq!(&r, &[0_i16; 32]);
3858-
_mm512_mask_compressstoreu_epi16(
3859-
r.as_mut_ptr() as *mut _,
3860-
0b11110000_11001010_11111111_00000000,
3861-
a,
3862-
);
3858+
_mm512_mask_compressstoreu_epi16(r.as_mut_ptr(), 0b11110000_11001010_11111111_00000000, a);
38633859
assert_eq!(
38643860
&r,
38653861
&[
@@ -3873,19 +3869,19 @@ mod tests {
38733869
unsafe fn test_mm256_mask_compressstoreu_epi16() {
38743870
let a = _mm256_set_epi16(16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1);
38753871
let mut r = [0_i16; 16];
3876-
_mm256_mask_compressstoreu_epi16(r.as_mut_ptr() as *mut _, 0, a);
3872+
_mm256_mask_compressstoreu_epi16(r.as_mut_ptr(), 0, a);
38773873
assert_eq!(&r, &[0_i16; 16]);
3878-
_mm256_mask_compressstoreu_epi16(r.as_mut_ptr() as *mut _, 0b11110000_11001010, a);
3874+
_mm256_mask_compressstoreu_epi16(r.as_mut_ptr(), 0b11110000_11001010, a);
38793875
assert_eq!(&r, &[2, 4, 7, 8, 13, 14, 15, 16, 0, 0, 0, 0, 0, 0, 0, 0]);
38803876
}
38813877

38823878
#[simd_test(enable = "avx512vbmi2,avx512vl")]
38833879
unsafe fn test_mm_mask_compressstoreu_epi16() {
38843880
let a = _mm_set_epi16(8, 7, 6, 5, 4, 3, 2, 1);
38853881
let mut r = [0_i16; 8];
3886-
_mm_mask_compressstoreu_epi16(r.as_mut_ptr() as *mut _, 0, a);
3882+
_mm_mask_compressstoreu_epi16(r.as_mut_ptr(), 0, a);
38873883
assert_eq!(&r, &[0_i16; 8]);
3888-
_mm_mask_compressstoreu_epi16(r.as_mut_ptr() as *mut _, 0b11110000, a);
3884+
_mm_mask_compressstoreu_epi16(r.as_mut_ptr(), 0b11110000, a);
38893885
assert_eq!(&r, &[5, 6, 7, 8, 0, 0, 0, 0]);
38903886
}
38913887

@@ -3897,10 +3893,10 @@ mod tests {
38973893
20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1,
38983894
);
38993895
let mut r = [0_i8; 64];
3900-
_mm512_mask_compressstoreu_epi8(r.as_mut_ptr() as *mut _, 0, a);
3896+
_mm512_mask_compressstoreu_epi8(r.as_mut_ptr(), 0, a);
39013897
assert_eq!(&r, &[0_i8; 64]);
39023898
_mm512_mask_compressstoreu_epi8(
3903-
r.as_mut_ptr() as *mut _,
3899+
r.as_mut_ptr(),
39043900
0b11110000_11001010_11111111_00000000_10101010_01010101_11110000_00001111,
39053901
a,
39063902
);
@@ -3921,13 +3917,9 @@ mod tests {
39213917
10, 9, 8, 7, 6, 5, 4, 3, 2, 1,
39223918
);
39233919
let mut r = [0_i8; 32];
3924-
_mm256_mask_compressstoreu_epi8(r.as_mut_ptr() as *mut _, 0, a);
3920+
_mm256_mask_compressstoreu_epi8(r.as_mut_ptr(), 0, a);
39253921
assert_eq!(&r, &[0_i8; 32]);
3926-
_mm256_mask_compressstoreu_epi8(
3927-
r.as_mut_ptr() as *mut _,
3928-
0b11110000_11001010_11111111_00000000,
3929-
a,
3930-
);
3922+
_mm256_mask_compressstoreu_epi8(r.as_mut_ptr(), 0b11110000_11001010_11111111_00000000, a);
39313923
assert_eq!(
39323924
&r,
39333925
&[
@@ -3941,9 +3933,9 @@ mod tests {
39413933
unsafe fn test_mm_mask_compressstoreu_epi8() {
39423934
let a = _mm_set_epi8(16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1);
39433935
let mut r = [0_i8; 16];
3944-
_mm_mask_compressstoreu_epi8(r.as_mut_ptr() as *mut _, 0, a);
3936+
_mm_mask_compressstoreu_epi8(r.as_mut_ptr(), 0, a);
39453937
assert_eq!(&r, &[0_i8; 16]);
3946-
_mm_mask_compressstoreu_epi8(r.as_mut_ptr() as *mut _, 0b11110000_11001010, a);
3938+
_mm_mask_compressstoreu_epi8(r.as_mut_ptr(), 0b11110000_11001010, a);
39473939
assert_eq!(&r, &[2, 4, 7, 8, 13, 14, 15, 16, 0, 0, 0, 0, 0, 0, 0, 0]);
39483940
}
39493941
}

0 commit comments

Comments
 (0)