Skip to content
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.

Commit b2763cc

Browse files
committedMar 13, 2022
Auto merge of rust-lang#94899 - workingjubilee:bump-simd-clamp, r=workingjubilee
Bump portable-simd to shadow Ord Yon usual bump. Summary for reference: - We are moving away from the subjective "directional" nomenclature, so `horizontal_*` becomes `reduce_*`, et cetera. - In addition, `Simd<Int, N>` now has methods which shadow Ord's methods directly, making those methods behave like the already "overloaded" float methods do.
2 parents 4800c78 + 2b1f249 commit b2763cc

File tree

20 files changed

+214
-105
lines changed

20 files changed

+214
-105
lines changed
 

‎library/core/src/slice/mod.rs

+1-1
Original file line numberDiff line numberDiff line change
@@ -3536,7 +3536,7 @@ impl<T> [T] {
35363536
/// suffix.iter().copied().sum(),
35373537
/// ]);
35383538
/// let sums = middle.iter().copied().fold(sums, f32x4::add);
3539-
/// sums.horizontal_sum()
3539+
/// sums.reduce_sum()
35403540
/// }
35413541
///
35423542
/// let numbers: Vec<f32> = (1..101).map(|x| x as _).collect();

‎library/portable-simd/beginners-guide.md

+2-2
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,7 @@ SIMD has a few special vocabulary terms you should know:
3333

3434
* **Vertical:** When an operation is "vertical", each lane processes individually without regard to the other lanes in the same vector. For example, a "vertical add" between two vectors would add lane 0 in `a` with lane 0 in `b`, with the total in lane 0 of `out`, and then the same thing for lanes 1, 2, etc. Most SIMD operations are vertical operations, so if your problem is a vertical problem then you can probably solve it with SIMD.
3535

36-
* **Horizontal:** When an operation is "horizontal", the lanes within a single vector interact in some way. A "horizontal add" might add up lane 0 of `a` with lane 1 of `a`, with the total in lane 0 of `out`.
36+
* **Reducing/Reduce:** When an operation is "reducing" (functions named `reduce_*`), the lanes within a single vector are merged using some operation such as addition, returning the merged value as a scalar. For instance, a reducing add would return the sum of all the lanes' values.
3737

3838
* **Target Feature:** Rust calls a CPU architecture extension a `target_feature`. Proper SIMD requires various CPU extensions to be enabled (details below). Don't confuse this with `feature`, which is a Cargo crate concept.
3939

@@ -83,4 +83,4 @@ Fortunately, most SIMD types have a fairly predictable size. `i32x4` is bit-equi
8383
However, this is not the same as alignment. Computer architectures generally prefer aligned accesses, especially when moving data between memory and vector registers, and while some support specialized operations that can bend the rules to help with this, unaligned access is still typically slow, or even undefined behavior. In addition, different architectures can require different alignments when interacting with their native SIMD types. For this reason, any `#[repr(simd)]` type has a non-portable alignment. If it is necessary to directly interact with the alignment of these types, it should be via [`mem::align_of`].
8484

8585
[`mem::transmute`]: https://doc.rust-lang.org/core/mem/fn.transmute.html
86-
[`mem::align_of`]: https://doc.rust-lang.org/core/mem/fn.align_of.html
86+
[`mem::align_of`]: https://doc.rust-lang.org/core/mem/fn.align_of.html

‎library/portable-simd/crates/core_simd/Cargo.toml

+1-1
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@ categories = ["hardware-support", "no-std"]
99
license = "MIT OR Apache-2.0"
1010

1111
[features]
12-
default = ["std", "generic_const_exprs"]
12+
default = []
1313
std = []
1414
generic_const_exprs = []
1515

‎library/portable-simd/crates/core_simd/examples/matrix_inversion.rs

+1-1
Original file line numberDiff line numberDiff line change
@@ -233,7 +233,7 @@ pub fn simd_inv4x4(m: Matrix4x4) -> Option<Matrix4x4> {
233233
let det = det.rotate_lanes_right::<2>() + det;
234234
let det = det.reverse().rotate_lanes_right::<2>() + det;
235235

236-
if det.horizontal_sum() == 0. {
236+
if det.reduce_sum() == 0. {
237237
return None;
238238
}
239239
// calculate the reciprocal

‎library/portable-simd/crates/core_simd/examples/nbody.rs

+4-4
Original file line numberDiff line numberDiff line change
@@ -107,10 +107,10 @@ mod nbody {
107107
let mut e = 0.;
108108
for i in 0..N_BODIES {
109109
let bi = &bodies[i];
110-
e += bi.mass * (bi.v * bi.v).horizontal_sum() * 0.5;
110+
e += bi.mass * (bi.v * bi.v).reduce_sum() * 0.5;
111111
for bj in bodies.iter().take(N_BODIES).skip(i + 1) {
112112
let dx = bi.x - bj.x;
113-
e -= bi.mass * bj.mass / (dx * dx).horizontal_sum().sqrt()
113+
e -= bi.mass * bj.mass / (dx * dx).reduce_sum().sqrt()
114114
}
115115
}
116116
e
@@ -134,8 +134,8 @@ mod nbody {
134134
let mut mag = [0.0; N];
135135
for i in (0..N).step_by(2) {
136136
let d2s = f64x2::from_array([
137-
(r[i] * r[i]).horizontal_sum(),
138-
(r[i + 1] * r[i + 1]).horizontal_sum(),
137+
(r[i] * r[i]).reduce_sum(),
138+
(r[i + 1] * r[i + 1]).reduce_sum(),
139139
]);
140140
let dmags = f64x2::splat(dt) / (d2s * d2s.sqrt());
141141
mag[i] = dmags[0];

‎library/portable-simd/crates/core_simd/examples/spectral_norm.rs

+2-2
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@ fn mult_av(v: &[f64], out: &mut [f64]) {
2020
sum += b / a;
2121
j += 2
2222
}
23-
*out = sum.horizontal_sum();
23+
*out = sum.reduce_sum();
2424
}
2525
}
2626

@@ -38,7 +38,7 @@ fn mult_atv(v: &[f64], out: &mut [f64]) {
3838
sum += b / a;
3939
j += 2
4040
}
41-
*out = sum.horizontal_sum();
41+
*out = sum.reduce_sum();
4242
}
4343
}
4444

‎library/portable-simd/crates/core_simd/src/comparisons.rs

+52
Original file line numberDiff line numberDiff line change
@@ -66,3 +66,55 @@ where
6666
unsafe { Mask::from_int_unchecked(intrinsics::simd_ge(self, other)) }
6767
}
6868
}
69+
70+
macro_rules! impl_ord_methods_vector {
71+
{ $type:ty } => {
72+
impl<const LANES: usize> Simd<$type, LANES>
73+
where
74+
LaneCount<LANES>: SupportedLaneCount,
75+
{
76+
/// Returns the lane-wise minimum with `other`.
77+
#[must_use = "method returns a new vector and does not mutate the original value"]
78+
#[inline]
79+
pub fn min(self, other: Self) -> Self {
80+
self.lanes_gt(other).select(other, self)
81+
}
82+
83+
/// Returns the lane-wise maximum with `other`.
84+
#[must_use = "method returns a new vector and does not mutate the original value"]
85+
#[inline]
86+
pub fn max(self, other: Self) -> Self {
87+
self.lanes_lt(other).select(other, self)
88+
}
89+
90+
/// Restrict each lane to a certain interval.
91+
///
92+
/// For each lane, returns `max` if `self` is greater than `max`, and `min` if `self` is
93+
/// less than `min`. Otherwise returns `self`.
94+
///
95+
/// # Panics
96+
///
97+
/// Panics if `min > max` on any lane.
98+
#[must_use = "method returns a new vector and does not mutate the original value"]
99+
#[inline]
100+
pub fn clamp(self, min: Self, max: Self) -> Self {
101+
assert!(
102+
min.lanes_le(max).all(),
103+
"each lane in `min` must be less than or equal to the corresponding lane in `max`",
104+
);
105+
self.max(min).min(max)
106+
}
107+
}
108+
}
109+
}
110+
111+
impl_ord_methods_vector!(i8);
112+
impl_ord_methods_vector!(i16);
113+
impl_ord_methods_vector!(i32);
114+
impl_ord_methods_vector!(i64);
115+
impl_ord_methods_vector!(isize);
116+
impl_ord_methods_vector!(u8);
117+
impl_ord_methods_vector!(u16);
118+
impl_ord_methods_vector!(u32);
119+
impl_ord_methods_vector!(u64);
120+
impl_ord_methods_vector!(usize);

‎library/portable-simd/crates/core_simd/src/intrinsics.rs

+8-1
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,6 @@
1818
//!
1919
//! Unless stated otherwise, all intrinsics for binary operations require SIMD vectors of equal types and lengths.
2020
21-
2221
// These intrinsics aren't linked directly from LLVM and are mostly undocumented, however they are
2322
// mostly lowered to the matching LLVM instructions by the compiler in a fairly straightforward manner.
2423
// The associated LLVM instruction or intrinsic is documented alongside each Rust intrinsic function.
@@ -130,6 +129,14 @@ extern "platform-intrinsic" {
130129
pub(crate) fn simd_reduce_xor<T, U>(x: T) -> U;
131130

132131
// truncate integer vector to bitmask
132+
// `fn simd_bitmask(vector) -> unsigned integer` takes a vector of integers and
133+
// returns either an unsigned integer or array of `u8`.
134+
// Every element in the vector becomes a single bit in the returned bitmask.
135+
// If the vector has less than 8 lanes, a u8 is returned with zeroed trailing bits.
136+
// The bit order of the result depends on the byte endianness. LSB-first for little
137+
// endian and MSB-first for big endian.
138+
//
139+
// UB if called on a vector with values other than 0 and -1.
133140
#[allow(unused)]
134141
pub(crate) fn simd_bitmask<T, U>(x: T) -> U;
135142

‎library/portable-simd/crates/core_simd/src/lib.rs

+1-2
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,5 @@
1-
#![cfg_attr(not(feature = "std"), no_std)]
1+
#![no_std]
22
#![feature(
3-
const_fn_trait_bound,
43
convert_float_to_int,
54
decl_macro,
65
intra_doc_pointers,

‎library/portable-simd/crates/core_simd/src/masks/to_bitmask.rs

+3
Original file line numberDiff line numberDiff line change
@@ -50,6 +50,9 @@ macro_rules! impl_integer_intrinsic {
5050
}
5151

5252
impl_integer_intrinsic! {
53+
unsafe impl ToBitMask<BitMask=u8> for Mask<_, 1>
54+
unsafe impl ToBitMask<BitMask=u8> for Mask<_, 2>
55+
unsafe impl ToBitMask<BitMask=u8> for Mask<_, 4>
5356
unsafe impl ToBitMask<BitMask=u8> for Mask<_, 8>
5457
unsafe impl ToBitMask<BitMask=u16> for Mask<_, 16>
5558
unsafe impl ToBitMask<BitMask=u32> for Mask<_, 32>

‎library/portable-simd/crates/core_simd/src/math.rs

+7-14
Original file line numberDiff line numberDiff line change
@@ -10,8 +10,7 @@ macro_rules! impl_uint_arith {
1010
/// # Examples
1111
/// ```
1212
/// # #![feature(portable_simd)]
13-
/// # #[cfg(feature = "std")] use core_simd::Simd;
14-
/// # #[cfg(not(feature = "std"))] use core::simd::Simd;
13+
/// # use core::simd::Simd;
1514
#[doc = concat!("# use core::", stringify!($ty), "::MAX;")]
1615
/// let x = Simd::from_array([2, 1, 0, MAX]);
1716
/// let max = Simd::splat(MAX);
@@ -31,8 +30,7 @@ macro_rules! impl_uint_arith {
3130
/// # Examples
3231
/// ```
3332
/// # #![feature(portable_simd)]
34-
/// # #[cfg(feature = "std")] use core_simd::Simd;
35-
/// # #[cfg(not(feature = "std"))] use core::simd::Simd;
33+
/// # use core::simd::Simd;
3634
#[doc = concat!("# use core::", stringify!($ty), "::MAX;")]
3735
/// let x = Simd::from_array([2, 1, 0, MAX]);
3836
/// let max = Simd::splat(MAX);
@@ -58,8 +56,7 @@ macro_rules! impl_int_arith {
5856
/// # Examples
5957
/// ```
6058
/// # #![feature(portable_simd)]
61-
/// # #[cfg(feature = "std")] use core_simd::Simd;
62-
/// # #[cfg(not(feature = "std"))] use core::simd::Simd;
59+
/// # use core::simd::Simd;
6360
#[doc = concat!("# use core::", stringify!($ty), "::{MIN, MAX};")]
6461
/// let x = Simd::from_array([MIN, 0, 1, MAX]);
6562
/// let max = Simd::splat(MAX);
@@ -79,8 +76,7 @@ macro_rules! impl_int_arith {
7976
/// # Examples
8077
/// ```
8178
/// # #![feature(portable_simd)]
82-
/// # #[cfg(feature = "std")] use core_simd::Simd;
83-
/// # #[cfg(not(feature = "std"))] use core::simd::Simd;
79+
/// # use core::simd::Simd;
8480
#[doc = concat!("# use core::", stringify!($ty), "::{MIN, MAX};")]
8581
/// let x = Simd::from_array([MIN, -2, -1, MAX]);
8682
/// let max = Simd::splat(MAX);
@@ -100,8 +96,7 @@ macro_rules! impl_int_arith {
10096
/// # Examples
10197
/// ```
10298
/// # #![feature(portable_simd)]
103-
/// # #[cfg(feature = "std")] use core_simd::Simd;
104-
/// # #[cfg(not(feature = "std"))] use core::simd::Simd;
99+
/// # use core::simd::Simd;
105100
#[doc = concat!("# use core::", stringify!($ty), "::{MIN, MAX};")]
106101
/// let xs = Simd::from_array([MIN, MIN +1, -5, 0]);
107102
/// assert_eq!(xs.abs(), Simd::from_array([MIN, MAX, 5, 0]));
@@ -119,8 +114,7 @@ macro_rules! impl_int_arith {
119114
/// # Examples
120115
/// ```
121116
/// # #![feature(portable_simd)]
122-
/// # #[cfg(feature = "std")] use core_simd::Simd;
123-
/// # #[cfg(not(feature = "std"))] use core::simd::Simd;
117+
/// # use core::simd::Simd;
124118
#[doc = concat!("# use core::", stringify!($ty), "::{MIN, MAX};")]
125119
/// let xs = Simd::from_array([MIN, -2, 0, 3]);
126120
/// let unsat = xs.abs();
@@ -142,8 +136,7 @@ macro_rules! impl_int_arith {
142136
/// # Examples
143137
/// ```
144138
/// # #![feature(portable_simd)]
145-
/// # #[cfg(feature = "std")] use core_simd::Simd;
146-
/// # #[cfg(not(feature = "std"))] use core::simd::Simd;
139+
/// # use core::simd::Simd;
147140
#[doc = concat!("# use core::", stringify!($ty), "::{MIN, MAX};")]
148141
/// let x = Simd::from_array([MIN, -2, 3, MAX]);
149142
/// let unsat = -x;

‎library/portable-simd/crates/core_simd/src/reduction.rs

+22-22
Original file line numberDiff line numberDiff line change
@@ -11,30 +11,30 @@ macro_rules! impl_integer_reductions {
1111
where
1212
LaneCount<LANES>: SupportedLaneCount,
1313
{
14-
/// Horizontal wrapping add. Returns the sum of the lanes of the vector, with wrapping addition.
14+
/// Reducing wrapping add. Returns the sum of the lanes of the vector, with wrapping addition.
1515
#[inline]
16-
pub fn horizontal_sum(self) -> $scalar {
16+
pub fn reduce_sum(self) -> $scalar {
1717
// Safety: `self` is an integer vector
1818
unsafe { simd_reduce_add_ordered(self, 0) }
1919
}
2020

21-
/// Horizontal wrapping multiply. Returns the product of the lanes of the vector, with wrapping multiplication.
21+
/// Reducing wrapping multiply. Returns the product of the lanes of the vector, with wrapping multiplication.
2222
#[inline]
23-
pub fn horizontal_product(self) -> $scalar {
23+
pub fn reduce_product(self) -> $scalar {
2424
// Safety: `self` is an integer vector
2525
unsafe { simd_reduce_mul_ordered(self, 1) }
2626
}
2727

28-
/// Horizontal maximum. Returns the maximum lane in the vector.
28+
/// Reducing maximum. Returns the maximum lane in the vector.
2929
#[inline]
30-
pub fn horizontal_max(self) -> $scalar {
30+
pub fn reduce_max(self) -> $scalar {
3131
// Safety: `self` is an integer vector
3232
unsafe { simd_reduce_max(self) }
3333
}
3434

35-
/// Horizontal minimum. Returns the minimum lane in the vector.
35+
/// Reducing minimum. Returns the minimum lane in the vector.
3636
#[inline]
37-
pub fn horizontal_min(self) -> $scalar {
37+
pub fn reduce_min(self) -> $scalar {
3838
// Safety: `self` is an integer vector
3939
unsafe { simd_reduce_min(self) }
4040
}
@@ -60,9 +60,9 @@ macro_rules! impl_float_reductions {
6060
LaneCount<LANES>: SupportedLaneCount,
6161
{
6262

63-
/// Horizontal add. Returns the sum of the lanes of the vector.
63+
/// Reducing add. Returns the sum of the lanes of the vector.
6464
#[inline]
65-
pub fn horizontal_sum(self) -> $scalar {
65+
pub fn reduce_sum(self) -> $scalar {
6666
// LLVM sum is inaccurate on i586
6767
if cfg!(all(target_arch = "x86", not(target_feature = "sse2"))) {
6868
self.as_array().iter().sum()
@@ -72,9 +72,9 @@ macro_rules! impl_float_reductions {
7272
}
7373
}
7474

75-
/// Horizontal multiply. Returns the product of the lanes of the vector.
75+
/// Reducing multiply. Returns the product of the lanes of the vector.
7676
#[inline]
77-
pub fn horizontal_product(self) -> $scalar {
77+
pub fn reduce_product(self) -> $scalar {
7878
// LLVM product is inaccurate on i586
7979
if cfg!(all(target_arch = "x86", not(target_feature = "sse2"))) {
8080
self.as_array().iter().product()
@@ -84,22 +84,22 @@ macro_rules! impl_float_reductions {
8484
}
8585
}
8686

87-
/// Horizontal maximum. Returns the maximum lane in the vector.
87+
/// Reducing maximum. Returns the maximum lane in the vector.
8888
///
8989
/// Returns values based on equality, so a vector containing both `0.` and `-0.` may
9090
/// return either. This function will not return `NaN` unless all lanes are `NaN`.
9191
#[inline]
92-
pub fn horizontal_max(self) -> $scalar {
92+
pub fn reduce_max(self) -> $scalar {
9393
// Safety: `self` is a float vector
9494
unsafe { simd_reduce_max(self) }
9595
}
9696

97-
/// Horizontal minimum. Returns the minimum lane in the vector.
97+
/// Reducing minimum. Returns the minimum lane in the vector.
9898
///
9999
/// Returns values based on equality, so a vector containing both `0.` and `-0.` may
100100
/// return either. This function will not return `NaN` unless all lanes are `NaN`.
101101
#[inline]
102-
pub fn horizontal_min(self) -> $scalar {
102+
pub fn reduce_min(self) -> $scalar {
103103
// Safety: `self` is a float vector
104104
unsafe { simd_reduce_min(self) }
105105
}
@@ -116,10 +116,10 @@ where
116116
T: SimdElement + BitAnd<T, Output = T>,
117117
LaneCount<LANES>: SupportedLaneCount,
118118
{
119-
/// Horizontal bitwise "and". Returns the cumulative bitwise "and" across the lanes of
119+
/// Reducing bitwise "and". Returns the cumulative bitwise "and" across the lanes of
120120
/// the vector.
121121
#[inline]
122-
pub fn horizontal_and(self) -> T {
122+
pub fn reduce_and(self) -> T {
123123
unsafe { simd_reduce_and(self) }
124124
}
125125
}
@@ -130,10 +130,10 @@ where
130130
T: SimdElement + BitOr<T, Output = T>,
131131
LaneCount<LANES>: SupportedLaneCount,
132132
{
133-
/// Horizontal bitwise "or". Returns the cumulative bitwise "or" across the lanes of
133+
/// Reducing bitwise "or". Returns the cumulative bitwise "or" across the lanes of
134134
/// the vector.
135135
#[inline]
136-
pub fn horizontal_or(self) -> T {
136+
pub fn reduce_or(self) -> T {
137137
unsafe { simd_reduce_or(self) }
138138
}
139139
}
@@ -144,10 +144,10 @@ where
144144
T: SimdElement + BitXor<T, Output = T>,
145145
LaneCount<LANES>: SupportedLaneCount,
146146
{
147-
/// Horizontal bitwise "xor". Returns the cumulative bitwise "xor" across the lanes of
147+
/// Reducing bitwise "xor". Returns the cumulative bitwise "xor" across the lanes of
148148
/// the vector.
149149
#[inline]
150-
pub fn horizontal_xor(self) -> T {
150+
pub fn reduce_xor(self) -> T {
151151
unsafe { simd_reduce_xor(self) }
152152
}
153153
}

‎library/portable-simd/crates/core_simd/src/select.rs

+2-4
Original file line numberDiff line numberDiff line change
@@ -14,8 +14,7 @@ where
1414
/// # Examples
1515
/// ```
1616
/// # #![feature(portable_simd)]
17-
/// # #[cfg(feature = "std")] use core_simd::{Simd, Mask};
18-
/// # #[cfg(not(feature = "std"))] use core::simd::{Simd, Mask};
17+
/// # use core::simd::{Simd, Mask};
1918
/// let a = Simd::from_array([0, 1, 2, 3]);
2019
/// let b = Simd::from_array([4, 5, 6, 7]);
2120
/// let mask = Mask::from_array([true, false, false, true]);
@@ -45,8 +44,7 @@ where
4544
/// # Examples
4645
/// ```
4746
/// # #![feature(portable_simd)]
48-
/// # #[cfg(feature = "std")] use core_simd::Mask;
49-
/// # #[cfg(not(feature = "std"))] use core::simd::Mask;
47+
/// # use core::simd::Mask;
5048
/// let a = Mask::<i32, 4>::from_array([true, true, false, false]);
5149
/// let b = Mask::<i32, 4>::from_array([false, false, true, true]);
5250
/// let mask = Mask::<i32, 4>::from_array([true, false, false, true]);

‎library/portable-simd/crates/core_simd/src/swizzle.rs

+4-8
Original file line numberDiff line numberDiff line change
@@ -12,8 +12,7 @@ use crate::simd::{LaneCount, Simd, SimdElement, SupportedLaneCount};
1212
/// ## One source vector
1313
/// ```
1414
/// # #![feature(portable_simd)]
15-
/// # #[cfg(feature = "std")] use core_simd::{Simd, simd_swizzle};
16-
/// # #[cfg(not(feature = "std"))] use core::simd::{Simd, simd_swizzle};
15+
/// # use core::simd::{Simd, simd_swizzle};
1716
/// let v = Simd::<f32, 4>::from_array([0., 1., 2., 3.]);
1817
///
1918
/// // Keeping the same size
@@ -28,8 +27,7 @@ use crate::simd::{LaneCount, Simd, SimdElement, SupportedLaneCount};
2827
/// ## Two source vectors
2928
/// ```
3029
/// # #![feature(portable_simd)]
31-
/// # #[cfg(feature = "std")] use core_simd::{Simd, simd_swizzle, Which};
32-
/// # #[cfg(not(feature = "std"))] use core::simd::{Simd, simd_swizzle, Which};
30+
/// # use core::simd::{Simd, simd_swizzle, Which};
3331
/// use Which::*;
3432
/// let a = Simd::<f32, 4>::from_array([0., 1., 2., 3.]);
3533
/// let b = Simd::<f32, 4>::from_array([4., 5., 6., 7.]);
@@ -273,8 +271,7 @@ where
273271
///
274272
/// ```
275273
/// #![feature(portable_simd)]
276-
/// # #[cfg(feature = "std")] use core_simd::Simd;
277-
/// # #[cfg(not(feature = "std"))] use core::simd::Simd;
274+
/// # use core::simd::Simd;
278275
/// let a = Simd::from_array([0, 1, 2, 3]);
279276
/// let b = Simd::from_array([4, 5, 6, 7]);
280277
/// let (x, y) = a.interleave(b);
@@ -337,8 +334,7 @@ where
337334
///
338335
/// ```
339336
/// #![feature(portable_simd)]
340-
/// # #[cfg(feature = "std")] use core_simd::Simd;
341-
/// # #[cfg(not(feature = "std"))] use core::simd::Simd;
337+
/// # use core::simd::Simd;
342338
/// let a = Simd::from_array([0, 4, 1, 5]);
343339
/// let b = Simd::from_array([2, 6, 3, 7]);
344340
/// let (x, y) = a.deinterleave(b);

‎library/portable-simd/crates/core_simd/src/vector.rs

+8-16
Original file line numberDiff line numberDiff line change
@@ -153,8 +153,7 @@ where
153153
/// # Examples
154154
/// ```
155155
/// # #![feature(portable_simd)]
156-
/// # #[cfg(feature = "std")] use core_simd::Simd;
157-
/// # #[cfg(not(feature = "std"))] use core::simd::Simd;
156+
/// # use core::simd::Simd;
158157
/// let floats: Simd<f32, 4> = Simd::from_array([1.9, -4.5, f32::INFINITY, f32::NAN]);
159158
/// let ints = floats.cast::<i32>();
160159
/// assert_eq!(ints, Simd::from_array([1, -4, i32::MAX, 0]));
@@ -180,8 +179,7 @@ where
180179
/// # Examples
181180
/// ```
182181
/// # #![feature(portable_simd)]
183-
/// # #[cfg(feature = "std")] use core_simd::Simd;
184-
/// # #[cfg(not(feature = "std"))] use core::simd::Simd;
182+
/// # use core::simd::Simd;
185183
/// let vec: Vec<i32> = vec![10, 11, 12, 13, 14, 15, 16, 17, 18];
186184
/// let idxs = Simd::from_array([9, 3, 0, 5]);
187185
/// let alt = Simd::from_array([-5, -4, -3, -2]);
@@ -201,8 +199,7 @@ where
201199
/// # Examples
202200
/// ```
203201
/// # #![feature(portable_simd)]
204-
/// # #[cfg(feature = "std")] use core_simd::Simd;
205-
/// # #[cfg(not(feature = "std"))] use core::simd::Simd;
202+
/// # use core::simd::Simd;
206203
/// let vec: Vec<i32> = vec![10, 11, 12, 13, 14, 15, 16, 17, 18];
207204
/// let idxs = Simd::from_array([9, 3, 0, 5]);
208205
///
@@ -225,8 +222,7 @@ where
225222
/// # Examples
226223
/// ```
227224
/// # #![feature(portable_simd)]
228-
/// # #[cfg(feature = "std")] use core_simd::{Simd, Mask};
229-
/// # #[cfg(not(feature = "std"))] use core::simd::{Simd, Mask};
225+
/// # use core::simd::{Simd, Mask};
230226
/// let vec: Vec<i32> = vec![10, 11, 12, 13, 14, 15, 16, 17, 18];
231227
/// let idxs = Simd::from_array([9, 3, 0, 5]);
232228
/// let alt = Simd::from_array([-5, -4, -3, -2]);
@@ -260,8 +256,7 @@ where
260256
/// # Examples
261257
/// ```
262258
/// # #![feature(portable_simd)]
263-
/// # #[cfg(feature = "std")] use core_simd::{Simd, Mask};
264-
/// # #[cfg(not(feature = "std"))] use core::simd::{Simd, Mask};
259+
/// # use core::simd::{Simd, Mask};
265260
/// let vec: Vec<i32> = vec![10, 11, 12, 13, 14, 15, 16, 17, 18];
266261
/// let idxs = Simd::from_array([9, 3, 0, 5]);
267262
/// let alt = Simd::from_array([-5, -4, -3, -2]);
@@ -296,8 +291,7 @@ where
296291
/// # Examples
297292
/// ```
298293
/// # #![feature(portable_simd)]
299-
/// # #[cfg(feature = "std")] use core_simd::Simd;
300-
/// # #[cfg(not(feature = "std"))] use core::simd::Simd;
294+
/// # use core::simd::Simd;
301295
/// let mut vec: Vec<i32> = vec![10, 11, 12, 13, 14, 15, 16, 17, 18];
302296
/// let idxs = Simd::from_array([9, 3, 0, 0]);
303297
/// let vals = Simd::from_array([-27, 82, -41, 124]);
@@ -319,8 +313,7 @@ where
319313
/// # Examples
320314
/// ```
321315
/// # #![feature(portable_simd)]
322-
/// # #[cfg(feature = "std")] use core_simd::{Simd, Mask};
323-
/// # #[cfg(not(feature = "std"))] use core::simd::{Simd, Mask};
316+
/// # use core::simd::{Simd, Mask};
324317
/// let mut vec: Vec<i32> = vec![10, 11, 12, 13, 14, 15, 16, 17, 18];
325318
/// let idxs = Simd::from_array([9, 3, 0, 0]);
326319
/// let vals = Simd::from_array([-27, 82, -41, 124]);
@@ -354,8 +347,7 @@ where
354347
/// # Examples
355348
/// ```
356349
/// # #![feature(portable_simd)]
357-
/// # #[cfg(feature = "std")] use core_simd::{Simd, Mask};
358-
/// # #[cfg(not(feature = "std"))] use core::simd::{Simd, Mask};
350+
/// # use core::simd::{Simd, Mask};
359351
/// let mut vec: Vec<i32> = vec![10, 11, 12, 13, 14, 15, 16, 17, 18];
360352
/// let idxs = Simd::from_array([9, 3, 0, 0]);
361353
/// let vals = Simd::from_array([-27, 82, -41, 124]);
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,32 @@
11
#![feature(portable_simd)]
2+
use core_simd::i16x2;
23

34
#[macro_use]
45
mod ops_macros;
56
impl_signed_tests! { i16 }
7+
8+
#[test]
9+
fn max_is_not_lexicographic() {
10+
let a = i16x2::splat(10);
11+
let b = i16x2::from_array([-4, 12]);
12+
assert_eq!(a.max(b), i16x2::from_array([10, 12]));
13+
}
14+
15+
#[test]
16+
fn min_is_not_lexicographic() {
17+
let a = i16x2::splat(10);
18+
let b = i16x2::from_array([12, -4]);
19+
assert_eq!(a.min(b), i16x2::from_array([10, -4]));
20+
}
21+
22+
#[test]
23+
fn clamp_is_not_lexicographic() {
24+
let a = i16x2::splat(10);
25+
let lo = i16x2::from_array([-12, -4]);
26+
let up = i16x2::from_array([-4, 12]);
27+
assert_eq!(a.clamp(lo, up), i16x2::from_array([-4, 10]));
28+
29+
let x = i16x2::from_array([1, 10]);
30+
let y = x.clamp(i16x2::splat(0), i16x2::splat(9));
31+
assert_eq!(y, i16x2::from_array([1, 9]));
32+
}

‎library/portable-simd/crates/core_simd/tests/ops_macros.rs

+51-22
Original file line numberDiff line numberDiff line change
@@ -94,70 +94,70 @@ macro_rules! impl_binary_checked_op_test {
9494
macro_rules! impl_common_integer_tests {
9595
{ $vector:ident, $scalar:ident } => {
9696
test_helpers::test_lanes! {
97-
fn horizontal_sum<const LANES: usize>() {
97+
fn reduce_sum<const LANES: usize>() {
9898
test_helpers::test_1(&|x| {
9999
test_helpers::prop_assert_biteq! (
100-
$vector::<LANES>::from_array(x).horizontal_sum(),
100+
$vector::<LANES>::from_array(x).reduce_sum(),
101101
x.iter().copied().fold(0 as $scalar, $scalar::wrapping_add),
102102
);
103103
Ok(())
104104
});
105105
}
106106

107-
fn horizontal_product<const LANES: usize>() {
107+
fn reduce_product<const LANES: usize>() {
108108
test_helpers::test_1(&|x| {
109109
test_helpers::prop_assert_biteq! (
110-
$vector::<LANES>::from_array(x).horizontal_product(),
110+
$vector::<LANES>::from_array(x).reduce_product(),
111111
x.iter().copied().fold(1 as $scalar, $scalar::wrapping_mul),
112112
);
113113
Ok(())
114114
});
115115
}
116116

117-
fn horizontal_and<const LANES: usize>() {
117+
fn reduce_and<const LANES: usize>() {
118118
test_helpers::test_1(&|x| {
119119
test_helpers::prop_assert_biteq! (
120-
$vector::<LANES>::from_array(x).horizontal_and(),
120+
$vector::<LANES>::from_array(x).reduce_and(),
121121
x.iter().copied().fold(-1i8 as $scalar, <$scalar as core::ops::BitAnd>::bitand),
122122
);
123123
Ok(())
124124
});
125125
}
126126

127-
fn horizontal_or<const LANES: usize>() {
127+
fn reduce_or<const LANES: usize>() {
128128
test_helpers::test_1(&|x| {
129129
test_helpers::prop_assert_biteq! (
130-
$vector::<LANES>::from_array(x).horizontal_or(),
130+
$vector::<LANES>::from_array(x).reduce_or(),
131131
x.iter().copied().fold(0 as $scalar, <$scalar as core::ops::BitOr>::bitor),
132132
);
133133
Ok(())
134134
});
135135
}
136136

137-
fn horizontal_xor<const LANES: usize>() {
137+
fn reduce_xor<const LANES: usize>() {
138138
test_helpers::test_1(&|x| {
139139
test_helpers::prop_assert_biteq! (
140-
$vector::<LANES>::from_array(x).horizontal_xor(),
140+
$vector::<LANES>::from_array(x).reduce_xor(),
141141
x.iter().copied().fold(0 as $scalar, <$scalar as core::ops::BitXor>::bitxor),
142142
);
143143
Ok(())
144144
});
145145
}
146146

147-
fn horizontal_max<const LANES: usize>() {
147+
fn reduce_max<const LANES: usize>() {
148148
test_helpers::test_1(&|x| {
149149
test_helpers::prop_assert_biteq! (
150-
$vector::<LANES>::from_array(x).horizontal_max(),
150+
$vector::<LANES>::from_array(x).reduce_max(),
151151
x.iter().copied().max().unwrap(),
152152
);
153153
Ok(())
154154
});
155155
}
156156

157-
fn horizontal_min<const LANES: usize>() {
157+
fn reduce_min<const LANES: usize>() {
158158
test_helpers::test_1(&|x| {
159159
test_helpers::prop_assert_biteq! (
160-
$vector::<LANES>::from_array(x).horizontal_min(),
160+
$vector::<LANES>::from_array(x).reduce_min(),
161161
x.iter().copied().min().unwrap(),
162162
);
163163
Ok(())
@@ -222,6 +222,35 @@ macro_rules! impl_signed_tests {
222222
assert_eq!(a % b, Vector::<LANES>::splat(0));
223223
}
224224

225+
fn min<const LANES: usize>() {
226+
let a = Vector::<LANES>::splat(Scalar::MIN);
227+
let b = Vector::<LANES>::splat(0);
228+
assert_eq!(a.min(b), a);
229+
let a = Vector::<LANES>::splat(Scalar::MAX);
230+
let b = Vector::<LANES>::splat(0);
231+
assert_eq!(a.min(b), b);
232+
}
233+
234+
fn max<const LANES: usize>() {
235+
let a = Vector::<LANES>::splat(Scalar::MIN);
236+
let b = Vector::<LANES>::splat(0);
237+
assert_eq!(a.max(b), b);
238+
let a = Vector::<LANES>::splat(Scalar::MAX);
239+
let b = Vector::<LANES>::splat(0);
240+
assert_eq!(a.max(b), a);
241+
}
242+
243+
fn clamp<const LANES: usize>() {
244+
let min = Vector::<LANES>::splat(Scalar::MIN);
245+
let max = Vector::<LANES>::splat(Scalar::MAX);
246+
let zero = Vector::<LANES>::splat(0);
247+
let one = Vector::<LANES>::splat(1);
248+
let negone = Vector::<LANES>::splat(-1);
249+
assert_eq!(zero.clamp(min, max), zero);
250+
assert_eq!(zero.clamp(min, one), zero);
251+
assert_eq!(zero.clamp(one, max), one);
252+
assert_eq!(zero.clamp(min, negone), negone);
253+
}
225254
}
226255

227256
test_helpers::test_lanes_panic! {
@@ -499,29 +528,29 @@ macro_rules! impl_float_tests {
499528
})
500529
}
501530

502-
fn horizontal_sum<const LANES: usize>() {
531+
fn reduce_sum<const LANES: usize>() {
503532
test_helpers::test_1(&|x| {
504533
test_helpers::prop_assert_biteq! (
505-
Vector::<LANES>::from_array(x).horizontal_sum(),
534+
Vector::<LANES>::from_array(x).reduce_sum(),
506535
x.iter().sum(),
507536
);
508537
Ok(())
509538
});
510539
}
511540

512-
fn horizontal_product<const LANES: usize>() {
541+
fn reduce_product<const LANES: usize>() {
513542
test_helpers::test_1(&|x| {
514543
test_helpers::prop_assert_biteq! (
515-
Vector::<LANES>::from_array(x).horizontal_product(),
544+
Vector::<LANES>::from_array(x).reduce_product(),
516545
x.iter().product(),
517546
);
518547
Ok(())
519548
});
520549
}
521550

522-
fn horizontal_max<const LANES: usize>() {
551+
fn reduce_max<const LANES: usize>() {
523552
test_helpers::test_1(&|x| {
524-
let vmax = Vector::<LANES>::from_array(x).horizontal_max();
553+
let vmax = Vector::<LANES>::from_array(x).reduce_max();
525554
let smax = x.iter().copied().fold(Scalar::NAN, Scalar::max);
526555
// 0 and -0 are treated the same
527556
if !(x.contains(&0.) && x.contains(&-0.) && vmax.abs() == 0. && smax.abs() == 0.) {
@@ -531,9 +560,9 @@ macro_rules! impl_float_tests {
531560
});
532561
}
533562

534-
fn horizontal_min<const LANES: usize>() {
563+
fn reduce_min<const LANES: usize>() {
535564
test_helpers::test_1(&|x| {
536-
let vmax = Vector::<LANES>::from_array(x).horizontal_min();
565+
let vmax = Vector::<LANES>::from_array(x).reduce_min();
537566
let smax = x.iter().copied().fold(Scalar::NAN, Scalar::min);
538567
// 0 and -0 are treated the same
539568
if !(x.contains(&0.) && x.contains(&-0.) && vmax.abs() == 0. && smax.abs() == 0.) {

‎library/portable-simd/crates/core_simd/tests/round.rs

-1
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,6 @@ macro_rules! float_rounding_test {
99
type Scalar = $scalar;
1010
type IntScalar = $int_scalar;
1111

12-
#[cfg(feature = "std")]
1312
test_helpers::test_lanes! {
1413
fn ceil<const LANES: usize>() {
1514
test_helpers::test_unary_elementwise(

‎library/portable-simd/crates/std_float/Cargo.toml

+1-1
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@ edition = "2021"
66
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
77

88
[dependencies]
9-
core_simd = { path = "../core_simd" }
9+
core_simd = { path = "../core_simd", default-features = false }
1010

1111
[features]
1212
default = ["as_crate"]

‎library/portable-simd/crates/test_helpers/src/lib.rs

+17-3
Original file line numberDiff line numberDiff line change
@@ -77,19 +77,29 @@ impl<T: core::fmt::Debug + DefaultStrategy, const LANES: usize> DefaultStrategy
7777
}
7878
}
7979

80+
#[cfg(not(miri))]
81+
fn make_runner() -> proptest::test_runner::TestRunner {
82+
Default::default()
83+
}
84+
#[cfg(miri)]
85+
fn make_runner() -> proptest::test_runner::TestRunner {
86+
// Only run a few tests on Miri
87+
proptest::test_runner::TestRunner::new(proptest::test_runner::Config::with_cases(4))
88+
}
89+
8090
/// Test a function that takes a single value.
8191
pub fn test_1<A: core::fmt::Debug + DefaultStrategy>(
8292
f: &dyn Fn(A) -> proptest::test_runner::TestCaseResult,
8393
) {
84-
let mut runner = proptest::test_runner::TestRunner::default();
94+
let mut runner = make_runner();
8595
runner.run(&A::default_strategy(), f).unwrap();
8696
}
8797

8898
/// Test a function that takes two values.
8999
pub fn test_2<A: core::fmt::Debug + DefaultStrategy, B: core::fmt::Debug + DefaultStrategy>(
90100
f: &dyn Fn(A, B) -> proptest::test_runner::TestCaseResult,
91101
) {
92-
let mut runner = proptest::test_runner::TestRunner::default();
102+
let mut runner = make_runner();
93103
runner
94104
.run(&(A::default_strategy(), B::default_strategy()), |(a, b)| {
95105
f(a, b)
@@ -105,7 +115,7 @@ pub fn test_3<
105115
>(
106116
f: &dyn Fn(A, B, C) -> proptest::test_runner::TestCaseResult,
107117
) {
108-
let mut runner = proptest::test_runner::TestRunner::default();
118+
let mut runner = make_runner();
109119
runner
110120
.run(
111121
&(
@@ -361,24 +371,28 @@ macro_rules! test_lanes {
361371

362372
#[test]
363373
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)]
374+
#[cfg(not(miri))] // Miri intrinsic implementations are uniform and larger tests are sloooow
364375
fn lanes_8() {
365376
implementation::<8>();
366377
}
367378

368379
#[test]
369380
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)]
381+
#[cfg(not(miri))] // Miri intrinsic implementations are uniform and larger tests are sloooow
370382
fn lanes_16() {
371383
implementation::<16>();
372384
}
373385

374386
#[test]
375387
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)]
388+
#[cfg(not(miri))] // Miri intrinsic implementations are uniform and larger tests are sloooow
376389
fn lanes_32() {
377390
implementation::<32>();
378391
}
379392

380393
#[test]
381394
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)]
395+
#[cfg(not(miri))] // Miri intrinsic implementations are uniform and larger tests are sloooow
382396
fn lanes_64() {
383397
implementation::<64>();
384398
}

0 commit comments

Comments
 (0)
Please sign in to comment.