@@ -153,28 +153,28 @@ pub unsafe fn _mm256_adds_epu16(a: __m256i, b: __m256i) -> __m256i {
153
153
/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_alignr_epi8)
154
154
#[ inline]
155
155
#[ target_feature( enable = "avx2" ) ]
156
- #[ cfg_attr( test, assert_instr( vpalignr, n = 7 ) ) ]
157
- #[ rustc_args_required_const ( 2 ) ]
156
+ #[ cfg_attr( test, assert_instr( vpalignr, IMM8 = 7 ) ) ]
157
+ #[ rustc_legacy_const_generics ( 2 ) ]
158
158
#[ stable( feature = "simd_x86" , since = "1.27.0" ) ]
159
- pub unsafe fn _mm256_alignr_epi8 ( a : __m256i , b : __m256i , n : i32 ) -> __m256i {
160
- let n = n as u32 ;
161
- // If ` palignr` is shifting the pair of vectors more than the size of two
159
+ pub unsafe fn _mm256_alignr_epi8 < const IMM8 : i32 > ( a : __m256i , b : __m256i ) -> __m256i {
160
+ static_assert_imm8 ! ( IMM8 ) ;
161
+ // If palignr is shifting the pair of vectors more than the size of two
162
162
// lanes, emit zero.
163
- if n > 32 {
163
+ if IMM8 > 32 {
164
164
return _mm256_set1_epi8 ( 0 ) ;
165
165
}
166
- // If ` palignr` is shifting the pair of input vectors more than one lane,
166
+ // If palignr is shifting the pair of input vectors more than one lane,
167
167
// but less than two lanes, convert to shifting in zeroes.
168
- let ( a, b, n ) = if n > 16 {
169
- ( _mm256_set1_epi8 ( 0 ) , a, n - 16 )
168
+ let ( a, b) = if IMM8 > 16 {
169
+ ( _mm256_set1_epi8 ( 0 ) , a)
170
170
} else {
171
- ( a, b, n )
171
+ ( a, b)
172
172
} ;
173
173
174
174
let a = a. as_i8x32 ( ) ;
175
175
let b = b. as_i8x32 ( ) ;
176
176
177
- let r: i8x32 = match n {
177
+ let r: i8x32 = match IMM8 % 16 {
178
178
0 => simd_shuffle32 (
179
179
b,
180
180
a,
@@ -5106,10 +5106,10 @@ mod tests {
5106
5106
-17 , -18 , -19 , -20 , -21 , -22 , -23 , -24 ,
5107
5107
-25 , -26 , -27 , -28 , -29 , -30 , -31 , -32 ,
5108
5108
) ;
5109
- let r = _mm256_alignr_epi8 ( a, b, 33 ) ;
5109
+ let r = _mm256_alignr_epi8 :: < 33 > ( a, b) ;
5110
5110
assert_eq_m256i ( r, _mm256_set1_epi8 ( 0 ) ) ;
5111
5111
5112
- let r = _mm256_alignr_epi8 ( a, b, 17 ) ;
5112
+ let r = _mm256_alignr_epi8 :: < 17 > ( a, b) ;
5113
5113
#[ rustfmt:: skip]
5114
5114
let expected = _mm256_setr_epi8 (
5115
5115
2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 ,
@@ -5119,7 +5119,7 @@ mod tests {
5119
5119
) ;
5120
5120
assert_eq_m256i ( r, expected) ;
5121
5121
5122
- let r = _mm256_alignr_epi8 ( a, b, 4 ) ;
5122
+ let r = _mm256_alignr_epi8 :: < 4 > ( a, b) ;
5123
5123
#[ rustfmt:: skip]
5124
5124
let expected = _mm256_setr_epi8 (
5125
5125
-5 , -6 , -7 , -8 , -9 , -10 , -11 , -12 ,
@@ -5136,10 +5136,10 @@ mod tests {
5136
5136
-18 , -19 , -20 , -21 , -22 , -23 , -24 , -25 ,
5137
5137
-26 , -27 , -28 , -29 , -30 , -31 , -32 ,
5138
5138
) ;
5139
- let r = _mm256_alignr_epi8 ( a, b, 16 ) ;
5139
+ let r = _mm256_alignr_epi8 :: < 16 > ( a, b) ;
5140
5140
assert_eq_m256i ( r, expected) ;
5141
5141
5142
- let r = _mm256_alignr_epi8 ( a, b, 15 ) ;
5142
+ let r = _mm256_alignr_epi8 :: < 15 > ( a, b) ;
5143
5143
#[ rustfmt:: skip]
5144
5144
let expected = _mm256_setr_epi8 (
5145
5145
-16 , 1 , 2 , 3 , 4 , 5 , 6 , 7 ,
@@ -5149,7 +5149,7 @@ mod tests {
5149
5149
) ;
5150
5150
assert_eq_m256i ( r, expected) ;
5151
5151
5152
- let r = _mm256_alignr_epi8 ( a, b, 0 ) ;
5152
+ let r = _mm256_alignr_epi8 :: < 0 > ( a, b) ;
5153
5153
assert_eq_m256i ( r, b) ;
5154
5154
}
5155
5155
0 commit comments