@@ -1532,7 +1532,7 @@ pub unsafe fn vcgtq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t {
1532
1532
simd_gt(a, b)
1533
1533
}
1534
1534
1535
- /// Compare unsigned highe
1535
+ /// Compare unsigned greater than
1536
1536
///
1537
1537
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_u8)
1538
1538
#[inline]
@@ -1545,7 +1545,7 @@ pub unsafe fn vcgt_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
1545
1545
simd_gt(a, b)
1546
1546
}
1547
1547
1548
- /// Compare unsigned highe
1548
+ /// Compare unsigned greater than
1549
1549
///
1550
1550
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_u8)
1551
1551
#[inline]
@@ -1558,7 +1558,7 @@ pub unsafe fn vcgtq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
1558
1558
simd_gt(a, b)
1559
1559
}
1560
1560
1561
- /// Compare unsigned highe
1561
+ /// Compare unsigned greater than
1562
1562
///
1563
1563
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_u16)
1564
1564
#[inline]
@@ -1571,7 +1571,7 @@ pub unsafe fn vcgt_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
1571
1571
simd_gt(a, b)
1572
1572
}
1573
1573
1574
- /// Compare unsigned highe
1574
+ /// Compare unsigned greater than
1575
1575
///
1576
1576
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_u16)
1577
1577
#[inline]
@@ -1584,7 +1584,7 @@ pub unsafe fn vcgtq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
1584
1584
simd_gt(a, b)
1585
1585
}
1586
1586
1587
- /// Compare unsigned highe
1587
+ /// Compare unsigned greater than
1588
1588
///
1589
1589
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_u32)
1590
1590
#[inline]
@@ -1597,7 +1597,7 @@ pub unsafe fn vcgt_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
1597
1597
simd_gt(a, b)
1598
1598
}
1599
1599
1600
- /// Compare unsigned highe
1600
+ /// Compare unsigned greater than
1601
1601
///
1602
1602
/// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_u32)
1603
1603
#[inline]
@@ -26185,7 +26185,7 @@ vrshlq_u64_(a, b)
26185
26185
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
26186
26186
pub unsafe fn vrshr_n_s8<const N: i32>(a: int8x8_t) -> int8x8_t {
26187
26187
static_assert!(N >= 1 && N <= 8);
26188
- vrshl_s8(a, vdup_n_s8((-N) as _))
26188
+ vrshl_s8(a, vdup_n_s8(-N as _))
26189
26189
}
26190
26190
26191
26191
/// Signed rounding shift right
@@ -26200,7 +26200,7 @@ pub unsafe fn vrshr_n_s8<const N: i32>(a: int8x8_t) -> int8x8_t {
26200
26200
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
26201
26201
pub unsafe fn vrshrq_n_s8<const N: i32>(a: int8x16_t) -> int8x16_t {
26202
26202
static_assert!(N >= 1 && N <= 8);
26203
- vrshlq_s8(a, vdupq_n_s8((-N) as _))
26203
+ vrshlq_s8(a, vdupq_n_s8(-N as _))
26204
26204
}
26205
26205
26206
26206
/// Signed rounding shift right
@@ -26215,7 +26215,7 @@ pub unsafe fn vrshrq_n_s8<const N: i32>(a: int8x16_t) -> int8x16_t {
26215
26215
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
26216
26216
pub unsafe fn vrshr_n_s16<const N: i32>(a: int16x4_t) -> int16x4_t {
26217
26217
static_assert!(N >= 1 && N <= 16);
26218
- vrshl_s16(a, vdup_n_s16((-N) as _))
26218
+ vrshl_s16(a, vdup_n_s16(-N as _))
26219
26219
}
26220
26220
26221
26221
/// Signed rounding shift right
@@ -26230,7 +26230,7 @@ pub unsafe fn vrshr_n_s16<const N: i32>(a: int16x4_t) -> int16x4_t {
26230
26230
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
26231
26231
pub unsafe fn vrshrq_n_s16<const N: i32>(a: int16x8_t) -> int16x8_t {
26232
26232
static_assert!(N >= 1 && N <= 16);
26233
- vrshlq_s16(a, vdupq_n_s16((-N) as _))
26233
+ vrshlq_s16(a, vdupq_n_s16(-N as _))
26234
26234
}
26235
26235
26236
26236
/// Signed rounding shift right
@@ -26245,7 +26245,7 @@ pub unsafe fn vrshrq_n_s16<const N: i32>(a: int16x8_t) -> int16x8_t {
26245
26245
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
26246
26246
pub unsafe fn vrshr_n_s32<const N: i32>(a: int32x2_t) -> int32x2_t {
26247
26247
static_assert!(N >= 1 && N <= 32);
26248
- vrshl_s32(a, vdup_n_s32((-N) as _))
26248
+ vrshl_s32(a, vdup_n_s32(-N as _))
26249
26249
}
26250
26250
26251
26251
/// Signed rounding shift right
@@ -26260,7 +26260,7 @@ pub unsafe fn vrshr_n_s32<const N: i32>(a: int32x2_t) -> int32x2_t {
26260
26260
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
26261
26261
pub unsafe fn vrshrq_n_s32<const N: i32>(a: int32x4_t) -> int32x4_t {
26262
26262
static_assert!(N >= 1 && N <= 32);
26263
- vrshlq_s32(a, vdupq_n_s32((-N) as _))
26263
+ vrshlq_s32(a, vdupq_n_s32(-N as _))
26264
26264
}
26265
26265
26266
26266
/// Signed rounding shift right
@@ -26275,7 +26275,7 @@ pub unsafe fn vrshrq_n_s32<const N: i32>(a: int32x4_t) -> int32x4_t {
26275
26275
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
26276
26276
pub unsafe fn vrshr_n_s64<const N: i32>(a: int64x1_t) -> int64x1_t {
26277
26277
static_assert!(N >= 1 && N <= 64);
26278
- vrshl_s64(a, vdup_n_s64((-N) as _))
26278
+ vrshl_s64(a, vdup_n_s64(-N as _))
26279
26279
}
26280
26280
26281
26281
/// Signed rounding shift right
@@ -26290,7 +26290,7 @@ pub unsafe fn vrshr_n_s64<const N: i32>(a: int64x1_t) -> int64x1_t {
26290
26290
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
26291
26291
pub unsafe fn vrshrq_n_s64<const N: i32>(a: int64x2_t) -> int64x2_t {
26292
26292
static_assert!(N >= 1 && N <= 64);
26293
- vrshlq_s64(a, vdupq_n_s64((-N) as _))
26293
+ vrshlq_s64(a, vdupq_n_s64(-N as _))
26294
26294
}
26295
26295
26296
26296
/// Unsigned rounding shift right
@@ -26305,7 +26305,7 @@ pub unsafe fn vrshrq_n_s64<const N: i32>(a: int64x2_t) -> int64x2_t {
26305
26305
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
26306
26306
pub unsafe fn vrshr_n_u8<const N: i32>(a: uint8x8_t) -> uint8x8_t {
26307
26307
static_assert!(N >= 1 && N <= 8);
26308
- vrshl_u8(a, vdup_n_s8((-N) as _))
26308
+ vrshl_u8(a, vdup_n_s8(-N as _))
26309
26309
}
26310
26310
26311
26311
/// Unsigned rounding shift right
@@ -26320,7 +26320,7 @@ pub unsafe fn vrshr_n_u8<const N: i32>(a: uint8x8_t) -> uint8x8_t {
26320
26320
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
26321
26321
pub unsafe fn vrshrq_n_u8<const N: i32>(a: uint8x16_t) -> uint8x16_t {
26322
26322
static_assert!(N >= 1 && N <= 8);
26323
- vrshlq_u8(a, vdupq_n_s8((-N) as _))
26323
+ vrshlq_u8(a, vdupq_n_s8(-N as _))
26324
26324
}
26325
26325
26326
26326
/// Unsigned rounding shift right
@@ -26335,7 +26335,7 @@ pub unsafe fn vrshrq_n_u8<const N: i32>(a: uint8x16_t) -> uint8x16_t {
26335
26335
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
26336
26336
pub unsafe fn vrshr_n_u16<const N: i32>(a: uint16x4_t) -> uint16x4_t {
26337
26337
static_assert!(N >= 1 && N <= 16);
26338
- vrshl_u16(a, vdup_n_s16((-N) as _))
26338
+ vrshl_u16(a, vdup_n_s16(-N as _))
26339
26339
}
26340
26340
26341
26341
/// Unsigned rounding shift right
@@ -26350,7 +26350,7 @@ pub unsafe fn vrshr_n_u16<const N: i32>(a: uint16x4_t) -> uint16x4_t {
26350
26350
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
26351
26351
pub unsafe fn vrshrq_n_u16<const N: i32>(a: uint16x8_t) -> uint16x8_t {
26352
26352
static_assert!(N >= 1 && N <= 16);
26353
- vrshlq_u16(a, vdupq_n_s16((-N) as _))
26353
+ vrshlq_u16(a, vdupq_n_s16(-N as _))
26354
26354
}
26355
26355
26356
26356
/// Unsigned rounding shift right
@@ -26365,7 +26365,7 @@ pub unsafe fn vrshrq_n_u16<const N: i32>(a: uint16x8_t) -> uint16x8_t {
26365
26365
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
26366
26366
pub unsafe fn vrshr_n_u32<const N: i32>(a: uint32x2_t) -> uint32x2_t {
26367
26367
static_assert!(N >= 1 && N <= 32);
26368
- vrshl_u32(a, vdup_n_s32((-N) as _))
26368
+ vrshl_u32(a, vdup_n_s32(-N as _))
26369
26369
}
26370
26370
26371
26371
/// Unsigned rounding shift right
@@ -26380,7 +26380,7 @@ pub unsafe fn vrshr_n_u32<const N: i32>(a: uint32x2_t) -> uint32x2_t {
26380
26380
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
26381
26381
pub unsafe fn vrshrq_n_u32<const N: i32>(a: uint32x4_t) -> uint32x4_t {
26382
26382
static_assert!(N >= 1 && N <= 32);
26383
- vrshlq_u32(a, vdupq_n_s32((-N) as _))
26383
+ vrshlq_u32(a, vdupq_n_s32(-N as _))
26384
26384
}
26385
26385
26386
26386
/// Unsigned rounding shift right
@@ -26395,7 +26395,7 @@ pub unsafe fn vrshrq_n_u32<const N: i32>(a: uint32x4_t) -> uint32x4_t {
26395
26395
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
26396
26396
pub unsafe fn vrshr_n_u64<const N: i32>(a: uint64x1_t) -> uint64x1_t {
26397
26397
static_assert!(N >= 1 && N <= 64);
26398
- vrshl_u64(a, vdup_n_s64((-N) as _))
26398
+ vrshl_u64(a, vdup_n_s64(-N as _))
26399
26399
}
26400
26400
26401
26401
/// Unsigned rounding shift right
@@ -26410,7 +26410,7 @@ pub unsafe fn vrshr_n_u64<const N: i32>(a: uint64x1_t) -> uint64x1_t {
26410
26410
#[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))]
26411
26411
pub unsafe fn vrshrq_n_u64<const N: i32>(a: uint64x2_t) -> uint64x2_t {
26412
26412
static_assert!(N >= 1 && N <= 64);
26413
- vrshlq_u64(a, vdupq_n_s64((-N) as _))
26413
+ vrshlq_u64(a, vdupq_n_s64(-N as _))
26414
26414
}
26415
26415
26416
26416
/// Rounding shift right narrow
0 commit comments